summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/api_common.cpp169
-rw-r--r--src/cache.hpp8
-rw-r--r--src/clblast.cpp208
-rw-r--r--src/clblast_cuda.cpp2436
-rw-r--r--src/clpp11.hpp88
-rw-r--r--src/cupp11.hpp782
-rw-r--r--src/cxpp11_common.hpp1
-rw-r--r--src/database/database.cpp9
-rw-r--r--src/database/database.hpp3
-rw-r--r--src/database/database_structure.hpp6
-rw-r--r--src/database/kernels/copy/copy_32.hpp7
-rw-r--r--src/database/kernels/copy/copy_3232.hpp11
-rw-r--r--src/database/kernels/copy/copy_64.hpp13
-rw-r--r--src/database/kernels/copy/copy_6464.hpp5
-rw-r--r--src/database/kernels/pad/pad_32.hpp5
-rw-r--r--src/database/kernels/pad/pad_3232.hpp9
-rw-r--r--src/database/kernels/pad/pad_64.hpp9
-rw-r--r--src/database/kernels/pad/pad_6464.hpp5
-rw-r--r--src/database/kernels/padtranspose/padtranspose_32.hpp5
-rw-r--r--src/database/kernels/padtranspose/padtranspose_3232.hpp3
-rw-r--r--src/database/kernels/padtranspose/padtranspose_64.hpp5
-rw-r--r--src/database/kernels/padtranspose/padtranspose_6464.hpp5
-rw-r--r--src/database/kernels/transpose/transpose_32.hpp7
-rw-r--r--src/database/kernels/transpose/transpose_3232.hpp7
-rw-r--r--src/database/kernels/transpose/transpose_64.hpp3
-rw-r--r--src/database/kernels/transpose/transpose_6464.hpp9
-rw-r--r--src/database/kernels/xaxpy/xaxpy_32.hpp9
-rw-r--r--src/database/kernels/xaxpy/xaxpy_3232.hpp5
-rw-r--r--src/database/kernels/xaxpy/xaxpy_64.hpp5
-rw-r--r--src/database/kernels/xaxpy/xaxpy_6464.hpp5
-rw-r--r--src/database/kernels/xdot/xdot_32.hpp5
-rw-r--r--src/database/kernels/xdot/xdot_3232.hpp7
-rw-r--r--src/database/kernels/xdot/xdot_64.hpp9
-rw-r--r--src/database/kernels/xdot/xdot_6464.hpp9
-rw-r--r--src/database/kernels/xgemm/xgemm_32.hpp9
-rw-r--r--src/database/kernels/xgemm/xgemm_3232.hpp9
-rw-r--r--src/database/kernels/xgemm/xgemm_64.hpp11
-rw-r--r--src/database/kernels/xgemm/xgemm_6464.hpp11
-rw-r--r--src/database/kernels/xgemm_direct/xgemm_direct_32.hpp12
-rw-r--r--src/database/kernels/xgemm_direct/xgemm_direct_3232.hpp8
-rw-r--r--src/database/kernels/xgemm_direct/xgemm_direct_64.hpp10
-rw-r--r--src/database/kernels/xgemm_direct/xgemm_direct_6464.hpp8
-rw-r--r--src/database/kernels/xgemv/xgemv_32.hpp5
-rw-r--r--src/database/kernels/xgemv/xgemv_3232.hpp3
-rw-r--r--src/database/kernels/xgemv/xgemv_64.hpp5
-rw-r--r--src/database/kernels/xgemv/xgemv_6464.hpp6
-rw-r--r--src/database/kernels/xgemv_fast/xgemv_fast_32.hpp7
-rw-r--r--src/database/kernels/xgemv_fast/xgemv_fast_3232.hpp5
-rw-r--r--src/database/kernels/xgemv_fast/xgemv_fast_64.hpp5
-rw-r--r--src/database/kernels/xgemv_fast/xgemv_fast_6464.hpp6
-rw-r--r--src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_32.hpp8
-rw-r--r--src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_3232.hpp18
-rw-r--r--src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_64.hpp6
-rw-r--r--src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_6464.hpp16
-rw-r--r--src/database/kernels/xger/xger_32.hpp5
-rw-r--r--src/database/kernels/xger/xger_3232.hpp9
-rw-r--r--src/database/kernels/xger/xger_64.hpp9
-rw-r--r--src/database/kernels/xger/xger_6464.hpp9
-rw-r--r--src/kernels/common.opencl33
-rw-r--r--src/kernels/level2/level2.opencl2
-rw-r--r--src/kernels/level3/invert_diagonal_blocks.opencl6
-rw-r--r--src/kernels/level3/transpose_fast.opencl60
-rw-r--r--src/kernels/level3/transpose_pad.opencl4
-rw-r--r--src/kernels/level3/xgemm_direct_batched.opencl16
-rw-r--r--src/kernels/level3/xgemm_direct_part1.opencl4
-rw-r--r--src/kernels/level3/xgemm_direct_part2.opencl12
-rw-r--r--src/kernels/level3/xgemm_direct_part3.opencl18
-rw-r--r--src/kernels/level3/xgemm_part1.opencl8
-rw-r--r--src/kernels/level3/xgemm_part3.opencl15
-rw-r--r--src/kernels/opencl_to_cuda.h90
-rw-r--r--src/routine.cpp39
-rw-r--r--src/routine.hpp1
-rw-r--r--src/routines/common.hpp3
-rw-r--r--src/routines/level2/xtrsv.cpp11
-rw-r--r--src/routines/level3/xgemm.cpp30
-rw-r--r--src/routines/level3/xtrsm.cpp110
-rw-r--r--src/routines/levelx/xaxpybatched.cpp6
-rw-r--r--src/routines/levelx/xgemmbatched.cpp22
-rw-r--r--src/routines/routines.hpp76
-rw-r--r--src/tuning/kernels/copy_fast.cpp113
-rw-r--r--src/tuning/kernels/copy_pad.cpp113
-rw-r--r--src/tuning/kernels/transpose_fast.cpp113
-rw-r--r--src/tuning/kernels/transpose_pad.cpp111
-rw-r--r--src/tuning/kernels/xaxpy.cpp110
-rw-r--r--src/tuning/kernels/xdot.cpp105
-rw-r--r--src/tuning/kernels/xgemm.cpp202
-rw-r--r--src/tuning/kernels/xgemm_direct.cpp184
-rw-r--r--src/tuning/kernels/xgemv.cpp131
-rw-r--r--src/tuning/kernels/xger.cpp112
-rw-r--r--src/tuning/tuning.hpp147
-rw-r--r--src/utilities/buffer_test.hpp2
-rw-r--r--src/utilities/clblast_exceptions.cpp2
-rw-r--r--src/utilities/clblast_exceptions.hpp3
-rw-r--r--src/utilities/utilities.cpp29
-rw-r--r--src/utilities/utilities.hpp13
95 files changed, 4849 insertions, 1279 deletions
diff --git a/src/api_common.cpp b/src/api_common.cpp
new file mode 100644
index 00000000..0d387cd9
--- /dev/null
+++ b/src/api_common.cpp
@@ -0,0 +1,169 @@
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the common (non-OpenCL-specific) functions of the CLBlast API.
+//
+// =================================================================================================
+
+#include <string>
+
+#include "utilities/utilities.hpp"
+#include "cache.hpp"
+#include "routines/routines.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Clears the cache of stored binaries
+StatusCode ClearCache() {
+ try {
+ ProgramCache::Instance().Invalidate();
+ BinaryCache::Instance().Invalidate();
+ } catch (...) { return DispatchException(); }
+ return StatusCode::kSuccess;
+}
+
+template <typename Real, typename Complex>
+void FillCacheForPrecision(Queue &queue) {
+ try {
+
+ // Runs all the level 1 set-up functions
+ Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
+ Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
+ Xscal<Real>(queue, nullptr); Xscal<Complex>(queue, nullptr);
+ Xcopy<Real>(queue, nullptr); Xcopy<Complex>(queue, nullptr);
+ Xaxpy<Real>(queue, nullptr); Xaxpy<Complex>(queue, nullptr);
+ Xdot<Real>(queue, nullptr);
+ Xdotu<Complex>(queue, nullptr);
+ Xdotc<Complex>(queue, nullptr);
+ Xnrm2<Real>(queue, nullptr); Xnrm2<Complex>(queue, nullptr);
+ Xasum<Real>(queue, nullptr); Xasum<Complex>(queue, nullptr);
+ Xsum<Real>(queue, nullptr); Xsum<Complex>(queue, nullptr);
+ Xamax<Real>(queue, nullptr); Xamax<Complex>(queue, nullptr);
+ Xmax<Real>(queue, nullptr); Xmax<Complex>(queue, nullptr);
+ Xmin<Real>(queue, nullptr); Xmin<Complex>(queue, nullptr);
+
+ // Runs all the level 2 set-up functions
+ Xgemv<Real>(queue, nullptr); Xgemv<Complex>(queue, nullptr);
+ Xgbmv<Real>(queue, nullptr); Xgbmv<Complex>(queue, nullptr);
+ Xhemv<Complex>(queue, nullptr);
+ Xhbmv<Complex>(queue, nullptr);
+ Xhpmv<Complex>(queue, nullptr);
+ Xsymv<Real>(queue, nullptr);
+ Xsbmv<Real>(queue, nullptr);
+ Xspmv<Real>(queue, nullptr);
+ Xtrmv<Real>(queue, nullptr); Xtrmv<Complex>(queue, nullptr);
+ Xtbmv<Real>(queue, nullptr); Xtbmv<Complex>(queue, nullptr);
+ Xtpmv<Real>(queue, nullptr); Xtpmv<Complex>(queue, nullptr);
+ Xger<Real>(queue, nullptr);
+ Xgeru<Complex>(queue, nullptr);
+ Xgerc<Complex>(queue, nullptr);
+ Xher<Complex,Real>(queue, nullptr);
+ Xhpr<Complex,Real>(queue, nullptr);
+ Xher2<Complex>(queue, nullptr);
+ Xhpr2<Complex>(queue, nullptr);
+ Xsyr<Real>(queue, nullptr);
+ Xspr<Real>(queue, nullptr);
+ Xsyr2<Real>(queue, nullptr);
+ Xspr2<Real>(queue, nullptr);
+
+ // Runs all the level 3 set-up functions
+ Xgemm<Real>(queue, nullptr); Xgemm<Complex>(queue, nullptr);
+ Xsymm<Real>(queue, nullptr); Xsymm<Complex>(queue, nullptr);
+ Xhemm<Complex>(queue, nullptr);
+ Xsyrk<Real>(queue, nullptr); Xsyrk<Complex>(queue, nullptr);
+ Xherk<Complex,Real>(queue, nullptr);
+ Xsyr2k<Real>(queue, nullptr); Xsyr2k<Complex>(queue, nullptr);
+ Xher2k<Complex,Real>(queue, nullptr);
+ Xtrmm<Real>(queue, nullptr); Xtrmm<Complex>(queue, nullptr);
+
+ // Runs all the non-BLAS set-up functions
+ Xomatcopy<Real>(queue, nullptr); Xomatcopy<Complex>(queue, nullptr);
+
+ } catch(const RuntimeErrorCode &e) {
+ if (e.status() != StatusCode::kNoDoublePrecision &&
+ e.status() != StatusCode::kNoHalfPrecision) {
+ throw;
+ }
+ }
+}
+
+// Fills the cache with all binaries for a specific device
+// TODO: Add half-precision FP16 set-up calls
+StatusCode FillCache(const RawDeviceID device) {
+ try {
+
+ // Creates a sample context and queue to match the normal routine calling conventions
+ auto device_cpp = Device(device);
+ auto context = Context(device_cpp);
+ auto queue = Queue(context, device_cpp);
+
+ FillCacheForPrecision<float, float2>(queue);
+ FillCacheForPrecision<double, double2>(queue);
+
+ } catch (...) { return DispatchException(); }
+ return StatusCode::kSuccess;
+}
+
+// =================================================================================================
+
+// Overrides the tuning parameters for this device-precision-kernel combination
+StatusCode OverrideParameters(const RawDeviceID device, const std::string &kernel_name,
+ const Precision precision,
+ const std::unordered_map<std::string,size_t> &parameters) {
+ try {
+
+ // Retrieves the device name
+ const auto device_cpp = Device(device);
+ const auto platform_id = device_cpp.PlatformID();
+ const auto device_name = GetDeviceName(device_cpp);
+
+ // Retrieves the current database values to verify whether the new ones are complete
+ auto in_cache = false;
+ auto current_database = DatabaseCache::Instance().Get(DatabaseKeyRef{platform_id, device, precision, kernel_name}, &in_cache);
+ if (!in_cache) {
+ log_debug("Searching database for kernel '" + kernel_name + "'");
+ current_database = Database(device_cpp, kernel_name, precision, {});
+ }
+
+ // Verifies the parameters size
+ const auto current_parameter_names = current_database.GetParameterNames();
+ if (current_parameter_names.size() != parameters.size()) {
+ return StatusCode::kMissingOverrideParameter;
+ }
+
+ // Retrieves the names and values separately and in the same order as the existing database
+ auto parameter_values = database::Params{0};
+ auto i = size_t{0};
+ for (const auto &current_param : current_parameter_names) {
+ if (parameters.find(current_param) == parameters.end()) {
+ return StatusCode::kMissingOverrideParameter;
+ }
+ const auto parameter_value = parameters.at(current_param);
+ parameter_values[i] = parameter_value;
+ ++i;
+ }
+
+ // Creates a small custom database based on the provided parameters
+ const auto database_device = database::DatabaseDevice{database::kDeviceNameDefault, parameter_values};
+ const auto database_architecture = database::DatabaseArchitecture{"default", {database_device}};
+ const auto database_vendor = database::DatabaseVendor{database::kDeviceTypeAll, "default", {database_architecture}};
+ const auto database_entry = database::DatabaseEntry{kernel_name, precision, current_parameter_names, {database_vendor}};
+ const auto database_entries = std::vector<database::DatabaseEntry>{database_entry};
+ const auto database = Database(device_cpp, kernel_name, precision, database_entries);
+
+ // Removes the old database entry and stores the new one in the cache
+ DatabaseCache::Instance().Remove(DatabaseKey{platform_id, device, precision, kernel_name});
+ DatabaseCache::Instance().Store(DatabaseKey{platform_id, device, precision, kernel_name}, Database(database));
+
+ } catch (...) { return DispatchException(); }
+ return StatusCode::kSuccess;
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/cache.hpp b/src/cache.hpp
index f6a948b6..1c8c9d4c 100644
--- a/src/cache.hpp
+++ b/src/cache.hpp
@@ -80,8 +80,8 @@ extern template std::string BinaryCache::Get(const BinaryKeyRef &, bool *) const
// The key struct for the cache of compiled OpenCL programs (context-dependent)
// Order of fields: context, device_id, precision, routine_name (smaller fields first)
-typedef std::tuple<cl_context, cl_device_id, Precision, std::string> ProgramKey;
-typedef std::tuple<const cl_context &, const cl_device_id &, const Precision &, const std::string &> ProgramKeyRef;
+typedef std::tuple<RawContext, RawDeviceID, Precision, std::string> ProgramKey;
+typedef std::tuple<const RawContext &, const RawDeviceID &, const Precision &, const std::string &> ProgramKeyRef;
typedef Cache<ProgramKey, Program> ProgramCache;
@@ -94,8 +94,8 @@ class Database;
// The key struct for the cache of database maps.
// Order of fields: platform_id, device_id, precision, kernel_name (smaller fields first)
-typedef std::tuple<cl_platform_id, cl_device_id, Precision, std::string> DatabaseKey;
-typedef std::tuple<const cl_platform_id &, const cl_device_id &, const Precision &, const std::string &> DatabaseKeyRef;
+typedef std::tuple<RawPlatformID, RawDeviceID, Precision, std::string> DatabaseKey;
+typedef std::tuple<const RawPlatformID &, const RawDeviceID &, const Precision &, const std::string &> DatabaseKeyRef;
typedef Cache<DatabaseKey, Database> DatabaseCache;
diff --git a/src/clblast.cpp b/src/clblast.cpp
index 3983e5fc..7d2c2cef 100644
--- a/src/clblast.cpp
+++ b/src/clblast.cpp
@@ -15,67 +15,9 @@
#include <string>
-#include "cache.hpp"
+#include "routines/routines.hpp"
#include "clblast.h"
-// BLAS level-1 includes
-#include "routines/level1/xswap.hpp"
-#include "routines/level1/xscal.hpp"
-#include "routines/level1/xcopy.hpp"
-#include "routines/level1/xaxpy.hpp"
-#include "routines/level1/xdot.hpp"
-#include "routines/level1/xdotu.hpp"
-#include "routines/level1/xdotc.hpp"
-#include "routines/level1/xnrm2.hpp"
-#include "routines/level1/xasum.hpp"
-#include "routines/level1/xsum.hpp" // non-BLAS routine
-#include "routines/level1/xamax.hpp"
-#include "routines/level1/xamin.hpp" // non-BLAS routine
-#include "routines/level1/xmax.hpp" // non-BLAS routine
-#include "routines/level1/xmin.hpp" // non-BLAS routine
-
-// BLAS level-2 includes
-#include "routines/level2/xgemv.hpp"
-#include "routines/level2/xgbmv.hpp"
-#include "routines/level2/xhemv.hpp"
-#include "routines/level2/xhbmv.hpp"
-#include "routines/level2/xhpmv.hpp"
-#include "routines/level2/xsymv.hpp"
-#include "routines/level2/xsbmv.hpp"
-#include "routines/level2/xspmv.hpp"
-#include "routines/level2/xtrmv.hpp"
-#include "routines/level2/xtbmv.hpp"
-#include "routines/level2/xtpmv.hpp"
-#include "routines/level2/xtrsv.hpp"
-#include "routines/level2/xger.hpp"
-#include "routines/level2/xgeru.hpp"
-#include "routines/level2/xgerc.hpp"
-#include "routines/level2/xher.hpp"
-#include "routines/level2/xhpr.hpp"
-#include "routines/level2/xher2.hpp"
-#include "routines/level2/xhpr2.hpp"
-#include "routines/level2/xsyr.hpp"
-#include "routines/level2/xspr.hpp"
-#include "routines/level2/xsyr2.hpp"
-#include "routines/level2/xspr2.hpp"
-
-// BLAS level-3 includes
-#include "routines/level3/xgemm.hpp"
-#include "routines/level3/xsymm.hpp"
-#include "routines/level3/xhemm.hpp"
-#include "routines/level3/xsyrk.hpp"
-#include "routines/level3/xherk.hpp"
-#include "routines/level3/xsyr2k.hpp"
-#include "routines/level3/xher2k.hpp"
-#include "routines/level3/xtrmm.hpp"
-#include "routines/level3/xtrsm.hpp"
-
-// Level-x includes (non-BLAS)
-#include "routines/levelx/xomatcopy.hpp"
-#include "routines/levelx/xim2col.hpp"
-#include "routines/levelx/xaxpybatched.hpp"
-#include "routines/levelx/xgemmbatched.hpp"
-
namespace clblast {
// =================================================================================================
@@ -2389,154 +2331,6 @@ template StatusCode PUBLIC_API GemmBatched<half>(const Layout, const Transpose,
cl_mem, const size_t*, const size_t,
const size_t,
cl_command_queue*, cl_event*);
-// =================================================================================================
-
-// Clears the cache of stored binaries
-StatusCode ClearCache() {
- try {
- ProgramCache::Instance().Invalidate();
- BinaryCache::Instance().Invalidate();
- } catch (...) { return DispatchException(); }
- return StatusCode::kSuccess;
-}
-
-template <typename Real, typename Complex>
-void FillCacheForPrecision(Queue &queue) {
- try {
-
- // Runs all the level 1 set-up functions
- Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
- Xswap<Real>(queue, nullptr); Xswap<Complex>(queue, nullptr);
- Xscal<Real>(queue, nullptr); Xscal<Complex>(queue, nullptr);
- Xcopy<Real>(queue, nullptr); Xcopy<Complex>(queue, nullptr);
- Xaxpy<Real>(queue, nullptr); Xaxpy<Complex>(queue, nullptr);
- Xdot<Real>(queue, nullptr);
- Xdotu<Complex>(queue, nullptr);
- Xdotc<Complex>(queue, nullptr);
- Xnrm2<Real>(queue, nullptr); Xnrm2<Complex>(queue, nullptr);
- Xasum<Real>(queue, nullptr); Xasum<Complex>(queue, nullptr);
- Xsum<Real>(queue, nullptr); Xsum<Complex>(queue, nullptr);
- Xamax<Real>(queue, nullptr); Xamax<Complex>(queue, nullptr);
- Xmax<Real>(queue, nullptr); Xmax<Complex>(queue, nullptr);
- Xmin<Real>(queue, nullptr); Xmin<Complex>(queue, nullptr);
-
- // Runs all the level 2 set-up functions
- Xgemv<Real>(queue, nullptr); Xgemv<Complex>(queue, nullptr);
- Xgbmv<Real>(queue, nullptr); Xgbmv<Complex>(queue, nullptr);
- Xhemv<Complex>(queue, nullptr);
- Xhbmv<Complex>(queue, nullptr);
- Xhpmv<Complex>(queue, nullptr);
- Xsymv<Real>(queue, nullptr);
- Xsbmv<Real>(queue, nullptr);
- Xspmv<Real>(queue, nullptr);
- Xtrmv<Real>(queue, nullptr); Xtrmv<Complex>(queue, nullptr);
- Xtbmv<Real>(queue, nullptr); Xtbmv<Complex>(queue, nullptr);
- Xtpmv<Real>(queue, nullptr); Xtpmv<Complex>(queue, nullptr);
- Xger<Real>(queue, nullptr);
- Xgeru<Complex>(queue, nullptr);
- Xgerc<Complex>(queue, nullptr);
- Xher<Complex,Real>(queue, nullptr);
- Xhpr<Complex,Real>(queue, nullptr);
- Xher2<Complex>(queue, nullptr);
- Xhpr2<Complex>(queue, nullptr);
- Xsyr<Real>(queue, nullptr);
- Xspr<Real>(queue, nullptr);
- Xsyr2<Real>(queue, nullptr);
- Xspr2<Real>(queue, nullptr);
-
- // Runs all the level 3 set-up functions
- Xgemm<Real>(queue, nullptr); Xgemm<Complex>(queue, nullptr);
- Xsymm<Real>(queue, nullptr); Xsymm<Complex>(queue, nullptr);
- Xhemm<Complex>(queue, nullptr);
- Xsyrk<Real>(queue, nullptr); Xsyrk<Complex>(queue, nullptr);
- Xherk<Complex,Real>(queue, nullptr);
- Xsyr2k<Real>(queue, nullptr); Xsyr2k<Complex>(queue, nullptr);
- Xher2k<Complex,Real>(queue, nullptr);
- Xtrmm<Real>(queue, nullptr); Xtrmm<Complex>(queue, nullptr);
-
- // Runs all the non-BLAS set-up functions
- Xomatcopy<Real>(queue, nullptr); Xomatcopy<Complex>(queue, nullptr);
-
- } catch(const RuntimeErrorCode &e) {
- if (e.status() != StatusCode::kNoDoublePrecision &&
- e.status() != StatusCode::kNoHalfPrecision) {
- throw;
- }
- }
-}
-
-// Fills the cache with all binaries for a specific device
-// TODO: Add half-precision FP16 set-up calls
-StatusCode FillCache(const cl_device_id device) {
- try {
-
- // Creates a sample context and queue to match the normal routine calling conventions
- auto device_cpp = Device(device);
- auto context = Context(device_cpp);
- auto queue = Queue(context, device_cpp);
-
- FillCacheForPrecision<float, float2>(queue);
- FillCacheForPrecision<double, double2>(queue);
-
- } catch (...) { return DispatchException(); }
- return StatusCode::kSuccess;
-}
-
-// =================================================================================================
-
-// Overrides the tuning parameters for this device-precision-kernel combination
-StatusCode OverrideParameters(const cl_device_id device, const std::string &kernel_name,
- const Precision precision,
- const std::unordered_map<std::string,size_t> &parameters) {
- try {
-
- // Retrieves the device name
- const auto device_cpp = Device(device);
- const auto platform_id = device_cpp.Platform();
- const auto device_name = GetDeviceName(device_cpp);
-
- // Retrieves the current database values to verify whether the new ones are complete
- auto in_cache = false;
- const auto current_database = DatabaseCache::Instance().Get(DatabaseKeyRef{platform_id, device, precision, kernel_name}, &in_cache);
- if (!in_cache) { return StatusCode::kInvalidOverrideKernel; }
- for (const auto &current_param : current_database.GetParameterNames()) {
- if (parameters.find(current_param) == parameters.end()) {
- return StatusCode::kMissingOverrideParameter;
- }
- }
-
- // Clears the existing program & binary cache for routines with the target kernel
- const auto routine_names = Routine::routines_by_kernel.at(kernel_name);
- for (const auto &routine_name : routine_names) {
- ProgramCache::Instance().RemoveBySubset<1, 2>(ProgramKey{nullptr, device, precision, routine_name});
- BinaryCache::Instance().Remove(BinaryKey{precision, routine_name, device_name});
- }
-
- // Retrieves the names and values separately
- auto parameter_values = database::Params{0};
- auto parameter_names = std::vector<std::string>();
- auto i = size_t{0};
- for (const auto &parameter : parameters) {
- parameter_values[i] = parameter.second;
- parameter_names.push_back(parameter.first);
- ++i;
- }
-
- // Creates a small custom database based on the provided parameters
- const auto database_device = database::DatabaseDevice{database::kDeviceNameDefault, parameter_values};
- const auto database_architecture = database::DatabaseArchitecture{"default", {database_device}};
- const auto database_vendor = database::DatabaseVendor{database::kDeviceTypeAll, "default", {database_architecture}};
- const auto database_entry = database::DatabaseEntry{kernel_name, precision, parameter_names, {database_vendor}};
- const auto database_entries = std::vector<database::DatabaseEntry>{database_entry};
- const auto database = Database(device_cpp, kernel_name, precision, database_entries);
-
- // Removes the old database entry and stores the new one in the cache
- DatabaseCache::Instance().Remove(DatabaseKey{platform_id, device, precision, kernel_name});
- DatabaseCache::Instance().Store(DatabaseKey{platform_id, device, precision, kernel_name}, Database(database));
-
- } catch (...) { return DispatchException(); }
- return StatusCode::kSuccess;
-}
// =================================================================================================
} // namespace clblast
diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp
new file mode 100644
index 00000000..0e3d949d
--- /dev/null
+++ b/src/clblast_cuda.cpp
@@ -0,0 +1,2436 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements all the BLAS API calls (CUDA version). In all cases, it does not much more
+// than creating a new object of the appropriate type, and calling the main routine on that object.
+// It forwards all status codes to the caller.
+//
+// =================================================================================================
+
+#include <string>
+
+#include "routines/routines.hpp"
+#include "clblast_cuda.h"
+
+namespace clblast {
+
+// =================================================================================================
+// BLAS level-1 (vector-vector) routines
+// =================================================================================================
+
+// Generate givens plane rotation: SROTG/DROTG
+template <typename T>
+StatusCode Rotg(CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice) {
+ return StatusCode::kNotImplemented;
+}
+template StatusCode PUBLIC_API Rotg<float>(CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Rotg<double>(CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// Generate modified givens plane rotation: SROTMG/DROTMG
+template <typename T>
+StatusCode Rotmg(CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice) {
+ return StatusCode::kNotImplemented;
+}
+template StatusCode PUBLIC_API Rotmg<float>(CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Rotmg<double>(CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// Apply givens plane rotation: SROT/DROT
+template <typename T>
+StatusCode Rot(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const T,
+ const T,
+ const CUcontext, const CUdevice) {
+ return StatusCode::kNotImplemented;
+}
+template StatusCode PUBLIC_API Rot<float>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const float,
+ const float,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Rot<double>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const double,
+ const double,
+ const CUcontext, const CUdevice);
+
+// Apply modified givens plane rotation: SROTM/DROTM
+template <typename T>
+StatusCode Rotm(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice) {
+ return StatusCode::kNotImplemented;
+}
+template StatusCode PUBLIC_API Rotm<float>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Rotm<double>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// Swap two vectors: SSWAP/DSWAP/CSWAP/ZSWAP/HSWAP
+template <typename T>
+StatusCode Swap(const size_t n,
+ CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xswap<T>(queue_cpp, nullptr);
+ routine.DoSwap(n,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Swap<float>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Swap<double>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Swap<float2>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Swap<double2>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Swap<half>(const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Vector scaling: SSCAL/DSCAL/CSCAL/ZSCAL/HSCAL
+template <typename T>
+StatusCode Scal(const size_t n,
+ const T alpha,
+ CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xscal<T>(queue_cpp, nullptr);
+ routine.DoScal(n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Scal<float>(const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Scal<double>(const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Scal<float2>(const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Scal<double2>(const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Scal<half>(const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Vector copy: SCOPY/DCOPY/CCOPY/ZCOPY/HCOPY
+template <typename T>
+StatusCode Copy(const size_t n,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xcopy<T>(queue_cpp, nullptr);
+ routine.DoCopy(n,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Copy<float>(const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Copy<double>(const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Copy<float2>(const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Copy<double2>(const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Copy<half>(const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Vector-times-constant plus vector: SAXPY/DAXPY/CAXPY/ZAXPY/HAXPY
+template <typename T>
+StatusCode Axpy(const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xaxpy<T>(queue_cpp, nullptr);
+ routine.DoAxpy(n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Axpy<float>(const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Axpy<double>(const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Axpy<float2>(const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Axpy<double2>(const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Axpy<half>(const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Dot product of two vectors: SDOT/DDOT/HDOT
+template <typename T>
+StatusCode Dot(const size_t n,
+ CUdeviceptr dot_buffer, const size_t dot_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xdot<T>(queue_cpp, nullptr);
+ routine.DoDot(n,
+ Buffer<T>(dot_buffer), dot_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Dot<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Dot<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Dot<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Dot product of two complex vectors: CDOTU/ZDOTU
+template <typename T>
+StatusCode Dotu(const size_t n,
+ CUdeviceptr dot_buffer, const size_t dot_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xdotu<T>(queue_cpp, nullptr);
+ routine.DoDotu(n,
+ Buffer<T>(dot_buffer), dot_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Dotu<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Dotu<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Dot product of two complex vectors, one conjugated: CDOTC/ZDOTC
+template <typename T>
+StatusCode Dotc(const size_t n,
+ CUdeviceptr dot_buffer, const size_t dot_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xdotc<T>(queue_cpp, nullptr);
+ routine.DoDotc(n,
+ Buffer<T>(dot_buffer), dot_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Dotc<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Dotc<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Euclidian norm of a vector: SNRM2/DNRM2/ScNRM2/DzNRM2/HNRM2
+template <typename T>
+StatusCode Nrm2(const size_t n,
+ CUdeviceptr nrm2_buffer, const size_t nrm2_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xnrm2<T>(queue_cpp, nullptr);
+ routine.DoNrm2(n,
+ Buffer<T>(nrm2_buffer), nrm2_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Nrm2<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Nrm2<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Nrm2<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Nrm2<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Nrm2<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Absolute sum of values in a vector: SASUM/DASUM/ScASUM/DzASUM/HASUM
+template <typename T>
+StatusCode Asum(const size_t n,
+ CUdeviceptr asum_buffer, const size_t asum_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xasum<T>(queue_cpp, nullptr);
+ routine.DoAsum(n,
+ Buffer<T>(asum_buffer), asum_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Asum<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Asum<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Asum<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Asum<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Asum<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Sum of values in a vector (non-BLAS function): SSUM/DSUM/ScSUM/DzSUM/HSUM
+template <typename T>
+StatusCode Sum(const size_t n,
+ CUdeviceptr sum_buffer, const size_t sum_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsum<T>(queue_cpp, nullptr);
+ routine.DoSum(n,
+ Buffer<T>(sum_buffer), sum_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Sum<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Sum<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Sum<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Sum<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Sum<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Index of absolute maximum value in a vector: iSAMAX/iDAMAX/iCAMAX/iZAMAX/iHAMAX
+template <typename T>
+StatusCode Amax(const size_t n,
+ CUdeviceptr imax_buffer, const size_t imax_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xamax<T>(queue_cpp, nullptr);
+ routine.DoAmax(n,
+ Buffer<unsigned int>(imax_buffer), imax_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Amax<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amax<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amax<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amax<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amax<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Index of absolute minimum value in a vector (non-BLAS function): iSAMIN/iDAMIN/iCAMIN/iZAMIN/iHAMIN
+template <typename T>
+StatusCode Amin(const size_t n,
+ CUdeviceptr imin_buffer, const size_t imin_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xamin<T>(queue_cpp, nullptr);
+ routine.DoAmin(n,
+ Buffer<unsigned int>(imin_buffer), imin_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Amin<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amin<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amin<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amin<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Amin<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Index of maximum value in a vector (non-BLAS function): iSMAX/iDMAX/iCMAX/iZMAX/iHMAX
+template <typename T>
+StatusCode Max(const size_t n,
+ CUdeviceptr imax_buffer, const size_t imax_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xmax<T>(queue_cpp, nullptr);
+ routine.DoMax(n,
+ Buffer<unsigned int>(imax_buffer), imax_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Max<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Max<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Max<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Max<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Max<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Index of minimum value in a vector (non-BLAS function): iSMIN/iDMIN/iCMIN/iZMIN/iHMIN
+template <typename T>
+StatusCode Min(const size_t n,
+ CUdeviceptr imin_buffer, const size_t imin_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xmin<T>(queue_cpp, nullptr);
+ routine.DoMin(n,
+ Buffer<unsigned int>(imin_buffer), imin_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Min<float>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Min<double>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Min<float2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Min<double2>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Min<half>(const size_t,
+ CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// =================================================================================================
+// BLAS level-2 (matrix-vector) routines
+// =================================================================================================
+
+// General matrix-vector multiplication: SGEMV/DGEMV/CGEMV/ZGEMV/HGEMV
+template <typename T>
+StatusCode Gemv(const Layout layout, const Transpose a_transpose,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xgemv<T>(queue_cpp, nullptr);
+ routine.DoGemv(layout, a_transpose,
+ m, n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Gemv<float>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemv<double>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemv<float2>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemv<double2>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemv<half>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// General banded matrix-vector multiplication: SGBMV/DGBMV/CGBMV/ZGBMV/HGBMV
+template <typename T>
+StatusCode Gbmv(const Layout layout, const Transpose a_transpose,
+ const size_t m, const size_t n, const size_t kl, const size_t ku,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xgbmv<T>(queue_cpp, nullptr);
+ routine.DoGbmv(layout, a_transpose,
+ m, n, kl, ku,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Gbmv<float>(const Layout, const Transpose,
+ const size_t, const size_t, const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gbmv<double>(const Layout, const Transpose,
+ const size_t, const size_t, const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gbmv<float2>(const Layout, const Transpose,
+ const size_t, const size_t, const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gbmv<double2>(const Layout, const Transpose,
+ const size_t, const size_t, const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gbmv<half>(const Layout, const Transpose,
+ const size_t, const size_t, const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian matrix-vector multiplication: CHEMV/ZHEMV
+template <typename T>
+StatusCode Hemv(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xhemv<T>(queue_cpp, nullptr);
+ routine.DoHemv(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Hemv<float2>(const Layout, const Triangle,
+ const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Hemv<double2>(const Layout, const Triangle,
+ const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian banded matrix-vector multiplication: CHBMV/ZHBMV
+template <typename T>
+StatusCode Hbmv(const Layout layout, const Triangle triangle,
+ const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xhbmv<T>(queue_cpp, nullptr);
+ routine.DoHbmv(layout, triangle,
+ n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Hbmv<float2>(const Layout, const Triangle,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Hbmv<double2>(const Layout, const Triangle,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian packed matrix-vector multiplication: CHPMV/ZHPMV
+template <typename T>
+StatusCode Hpmv(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr ap_buffer, const size_t ap_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xhpmv<T>(queue_cpp, nullptr);
+ routine.DoHpmv(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(ap_buffer), ap_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Hpmv<float2>(const Layout, const Triangle,
+ const size_t,
+ const float2,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Hpmv<double2>(const Layout, const Triangle,
+ const size_t,
+ const double2,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric matrix-vector multiplication: SSYMV/DSYMV/HSYMV
+template <typename T>
+StatusCode Symv(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsymv<T>(queue_cpp, nullptr);
+ routine.DoSymv(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Symv<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Symv<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Symv<half>(const Layout, const Triangle,
+ const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric banded matrix-vector multiplication: SSBMV/DSBMV/HSBMV
+template <typename T>
+StatusCode Sbmv(const Layout layout, const Triangle triangle,
+ const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsbmv<T>(queue_cpp, nullptr);
+ routine.DoSbmv(layout, triangle,
+ n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Sbmv<float>(const Layout, const Triangle,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Sbmv<double>(const Layout, const Triangle,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Sbmv<half>(const Layout, const Triangle,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric packed matrix-vector multiplication: SSPMV/DSPMV/HSPMV
+template <typename T>
+StatusCode Spmv(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr ap_buffer, const size_t ap_offset,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xspmv<T>(queue_cpp, nullptr);
+ routine.DoSpmv(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(ap_buffer), ap_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ beta,
+ Buffer<T>(y_buffer), y_offset, y_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Spmv<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Spmv<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Spmv<half>(const Layout, const Triangle,
+ const size_t,
+ const half,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Triangular matrix-vector multiplication: STRMV/DTRMV/CTRMV/ZTRMV/HTRMV
+template <typename T>
+StatusCode Trmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
+ const size_t n,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xtrmv<T>(queue_cpp, nullptr);
+ routine.DoTrmv(layout, triangle, a_transpose, diagonal,
+ n,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Trmv<float>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmv<double>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmv<float2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmv<double2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmv<half>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Triangular banded matrix-vector multiplication: STBMV/DTBMV/CTBMV/ZTBMV/HTBMV
+template <typename T>
+StatusCode Tbmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
+ const size_t n, const size_t k,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xtbmv<T>(queue_cpp, nullptr);
+ routine.DoTbmv(layout, triangle, a_transpose, diagonal,
+ n, k,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Tbmv<float>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tbmv<double>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tbmv<float2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tbmv<double2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tbmv<half>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Triangular packed matrix-vector multiplication: STPMV/DTPMV/CTPMV/ZTPMV/HTPMV
+template <typename T>
+StatusCode Tpmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
+ const size_t n,
+ const CUdeviceptr ap_buffer, const size_t ap_offset,
+ CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xtpmv<T>(queue_cpp, nullptr);
+ routine.DoTpmv(layout, triangle, a_transpose, diagonal,
+ n,
+ Buffer<T>(ap_buffer), ap_offset,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Tpmv<float>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tpmv<double>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tpmv<float2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tpmv<double2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tpmv<half>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Solves a triangular system of equations: STRSV/DTRSV/CTRSV/ZTRSV
+template <typename T>
+StatusCode Trsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
+ const size_t n,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xtrsv<T>(queue_cpp, nullptr);
+ routine.DoTrsv(layout, triangle, a_transpose, diagonal,
+ n,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(x_buffer), x_offset, x_inc);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Trsv<float>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trsv<double>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trsv<float2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trsv<double2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Solves a banded triangular system of equations: STBSV/DTBSV/CTBSV/ZTBSV
+template <typename T>
+StatusCode Tbsv(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice) {
+ return StatusCode::kNotImplemented;
+}
+template StatusCode PUBLIC_API Tbsv<float>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tbsv<double>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tbsv<float2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tbsv<double2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Solves a packed triangular system of equations: STPSV/DTPSV/CTPSV/ZTPSV
+template <typename T>
+StatusCode Tpsv(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice) {
+ return StatusCode::kNotImplemented;
+}
+template StatusCode PUBLIC_API Tpsv<float>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tpsv<double>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tpsv<float2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Tpsv<double2>(const Layout, const Triangle, const Transpose, const Diagonal,
+ const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// General rank-1 matrix update: SGER/DGER/HGER
+template <typename T>
+StatusCode Ger(const Layout layout,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xger<T>(queue_cpp, nullptr);
+ routine.DoGer(layout,
+ m, n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc,
+ Buffer<T>(a_buffer), a_offset, a_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Ger<float>(const Layout,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Ger<double>(const Layout,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Ger<half>(const Layout,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// General rank-1 complex matrix update: CGERU/ZGERU
+template <typename T>
+StatusCode Geru(const Layout layout,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xgeru<T>(queue_cpp, nullptr);
+ routine.DoGeru(layout,
+ m, n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc,
+ Buffer<T>(a_buffer), a_offset, a_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Geru<float2>(const Layout,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Geru<double2>(const Layout,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// General rank-1 complex conjugated matrix update: CGERC/ZGERC
+template <typename T>
+StatusCode Gerc(const Layout layout,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xgerc<T>(queue_cpp, nullptr);
+ routine.DoGerc(layout,
+ m, n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc,
+ Buffer<T>(a_buffer), a_offset, a_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Gerc<float2>(const Layout,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gerc<double2>(const Layout,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian rank-1 matrix update: CHER/ZHER
+template <typename T>
+StatusCode Her(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xher<std::complex<T>,T>(queue_cpp, nullptr);
+ routine.DoHer(layout, triangle,
+ n,
+ alpha,
+ Buffer<std::complex<T>>(x_buffer), x_offset, x_inc,
+ Buffer<std::complex<T>>(a_buffer), a_offset, a_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Her<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Her<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian packed rank-1 matrix update: CHPR/ZHPR
+template <typename T>
+StatusCode Hpr(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ CUdeviceptr ap_buffer, const size_t ap_offset,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xhpr<std::complex<T>,T>(queue_cpp, nullptr);
+ routine.DoHpr(layout, triangle,
+ n,
+ alpha,
+ Buffer<std::complex<T>>(x_buffer), x_offset, x_inc,
+ Buffer<std::complex<T>>(ap_buffer), ap_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Hpr<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Hpr<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian rank-2 matrix update: CHER2/ZHER2
+template <typename T>
+StatusCode Her2(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xher2<T>(queue_cpp, nullptr);
+ routine.DoHer2(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc,
+ Buffer<T>(a_buffer), a_offset, a_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Her2<float2>(const Layout, const Triangle,
+ const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Her2<double2>(const Layout, const Triangle,
+ const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian packed rank-2 matrix update: CHPR2/ZHPR2
+template <typename T>
+StatusCode Hpr2(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ CUdeviceptr ap_buffer, const size_t ap_offset,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xhpr2<T>(queue_cpp, nullptr);
+ routine.DoHpr2(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc,
+ Buffer<T>(ap_buffer), ap_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Hpr2<float2>(const Layout, const Triangle,
+ const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Hpr2<double2>(const Layout, const Triangle,
+ const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric rank-1 matrix update: SSYR/DSYR/HSYR
+template <typename T>
+StatusCode Syr(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsyr<T>(queue_cpp, nullptr);
+ routine.DoSyr(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(a_buffer), a_offset, a_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Syr<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr<half>(const Layout, const Triangle,
+ const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric packed rank-1 matrix update: SSPR/DSPR/HSPR
+template <typename T>
+StatusCode Spr(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ CUdeviceptr ap_buffer, const size_t ap_offset,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xspr<T>(queue_cpp, nullptr);
+ routine.DoSpr(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(ap_buffer), ap_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Spr<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Spr<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Spr<half>(const Layout, const Triangle,
+ const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric rank-2 matrix update: SSYR2/DSYR2/HSYR2
+template <typename T>
+StatusCode Syr2(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsyr2<T>(queue_cpp, nullptr);
+ routine.DoSyr2(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc,
+ Buffer<T>(a_buffer), a_offset, a_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Syr2<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr2<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr2<half>(const Layout, const Triangle,
+ const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric packed rank-2 matrix update: SSPR2/DSPR2/HSPR2
+template <typename T>
+StatusCode Spr2(const Layout layout, const Triangle triangle,
+ const size_t n,
+ const T alpha,
+ const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc,
+ const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc,
+ CUdeviceptr ap_buffer, const size_t ap_offset,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xspr2<T>(queue_cpp, nullptr);
+ routine.DoSpr2(layout, triangle,
+ n,
+ alpha,
+ Buffer<T>(x_buffer), x_offset, x_inc,
+ Buffer<T>(y_buffer), y_offset, y_inc,
+ Buffer<T>(ap_buffer), ap_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Spr2<float>(const Layout, const Triangle,
+ const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Spr2<double>(const Layout, const Triangle,
+ const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Spr2<half>(const Layout, const Triangle,
+ const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// =================================================================================================
+// BLAS level-3 (matrix-matrix) routines
+// =================================================================================================
+
+// General matrix-matrix multiplication: SGEMM/DGEMM/CGEMM/ZGEMM/HGEMM
+template <typename T>
+StatusCode Gemm(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const T beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xgemm<T>(queue_cpp, nullptr);
+ routine.DoGemm(layout, a_transpose, b_transpose,
+ m, n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Gemm<float>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemm<double>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemm<float2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemm<double2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Gemm<half>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Symmetric matrix-matrix multiplication: SSYMM/DSYMM/CSYMM/ZSYMM/HSYMM
+template <typename T>
+StatusCode Symm(const Layout layout, const Side side, const Triangle triangle,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const T beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsymm<T>(queue_cpp, nullptr);
+ routine.DoSymm(layout, side, triangle,
+ m, n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Symm<float>(const Layout, const Side, const Triangle,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Symm<double>(const Layout, const Side, const Triangle,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Symm<float2>(const Layout, const Side, const Triangle,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Symm<double2>(const Layout, const Side, const Triangle,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Symm<half>(const Layout, const Side, const Triangle,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Hermitian matrix-matrix multiplication: CHEMM/ZHEMM
+template <typename T>
+StatusCode Hemm(const Layout layout, const Side side, const Triangle triangle,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const T beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xhemm<T>(queue_cpp, nullptr);
+ routine.DoHemm(layout, side, triangle,
+ m, n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Hemm<float2>(const Layout, const Side, const Triangle,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Hemm<double2>(const Layout, const Side, const Triangle,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Rank-K update of a symmetric matrix: SSYRK/DSYRK/CSYRK/ZSYRK/HSYRK
+template <typename T>
+StatusCode Syrk(const Layout layout, const Triangle triangle, const Transpose a_transpose,
+ const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const T beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsyrk<T>(queue_cpp, nullptr);
+ routine.DoSyrk(layout, triangle, a_transpose,
+ n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Syrk<float>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syrk<double>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syrk<float2>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syrk<double2>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syrk<half>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Rank-K update of a hermitian matrix: CHERK/ZHERK
+template <typename T>
+StatusCode Herk(const Layout layout, const Triangle triangle, const Transpose a_transpose,
+ const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const T beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xherk<std::complex<T>,T>(queue_cpp, nullptr);
+ routine.DoHerk(layout, triangle, a_transpose,
+ n, k,
+ alpha,
+ Buffer<std::complex<T>>(a_buffer), a_offset, a_ld,
+ beta,
+ Buffer<std::complex<T>>(c_buffer), c_offset, c_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Herk<float>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Herk<double>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Rank-2K update of a symmetric matrix: SSYR2K/DSYR2K/CSYR2K/ZSYR2K/HSYR2K
+template <typename T>
+StatusCode Syr2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose,
+ const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const T beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xsyr2k<T>(queue_cpp, nullptr);
+ routine.DoSyr2k(layout, triangle, ab_transpose,
+ n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Syr2k<float>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr2k<double>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr2k<float2>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr2k<double2>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Syr2k<half>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Rank-2K update of a hermitian matrix: CHER2K/ZHER2K
+template <typename T, typename U>
+StatusCode Her2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose,
+ const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const U beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xher2k<T,U>(queue_cpp, nullptr);
+ routine.DoHer2k(layout, triangle, ab_transpose,
+ n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Her2k<float2,float>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Her2k<double2,double>(const Layout, const Triangle, const Transpose,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Triangular matrix-matrix multiplication: STRMM/DTRMM/CTRMM/ZTRMM/HTRMM
+template <typename T>
+StatusCode Trmm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xtrmm<T>(queue_cpp, nullptr);
+ routine.DoTrmm(layout, side, triangle, a_transpose, diagonal,
+ m, n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Trmm<float>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmm<double>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmm<float2>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmm<double2>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trmm<half>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Solves a triangular system of equations: STRSM/DTRSM/CTRSM/ZTRSM
+template <typename T>
+StatusCode Trsm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xtrsm<T>(queue_cpp, nullptr);
+ routine.DoTrsm(layout, side, triangle, a_transpose, diagonal,
+ m, n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Trsm<float>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trsm<double>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trsm<float2>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Trsm<double2>(const Layout, const Side, const Triangle, const Transpose, const Diagonal,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// =================================================================================================
+// Extra non-BLAS routines (level-X)
+// =================================================================================================
+
+// Scaling and out-place transpose/copy (non-BLAS function): SOMATCOPY/DOMATCOPY/COMATCOPY/ZOMATCOPY/HOMATCOPY
+template <typename T>
+StatusCode Omatcopy(const Layout layout, const Transpose a_transpose,
+ const size_t m, const size_t n,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld,
+ CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xomatcopy<T>(queue_cpp, nullptr);
+ routine.DoOmatcopy(layout, a_transpose,
+ m, n,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld,
+ Buffer<T>(b_buffer), b_offset, b_ld);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Omatcopy<float>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Omatcopy<double>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Omatcopy<float2>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Omatcopy<double2>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Omatcopy<half>(const Layout, const Transpose,
+ const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t,
+ CUdeviceptr, const size_t, const size_t,
+ const CUcontext, const CUdevice);
+
+// Im2col function (non-BLAS function): SIM2COL/DIM2COL/CIM2COL/ZIM2COL/HIM2COL
+template <typename T>
+StatusCode Im2col(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w,
+ const CUdeviceptr im_buffer, const size_t im_offset,
+ CUdeviceptr col_buffer, const size_t col_offset,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xim2col<T>(queue_cpp, nullptr);
+ routine.DoIm2col(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
+ Buffer<T>(im_buffer), im_offset,
+ Buffer<T>(col_buffer), col_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Im2col<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Im2col<double>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Im2col<float2>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Im2col<double2>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
+// Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED
+template <typename T>
+StatusCode AxpyBatched(const size_t n,
+ const T *alphas,
+ const CUdeviceptr x_buffer, const size_t *x_offsets, const size_t x_inc,
+ CUdeviceptr y_buffer, const size_t *y_offsets, const size_t y_inc,
+ const size_t batch_count,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = XaxpyBatched<T>(queue_cpp, nullptr);
+ auto alphas_cpp = std::vector<T>();
+ auto x_offsets_cpp = std::vector<size_t>();
+ auto y_offsets_cpp = std::vector<size_t>();
+ for (auto batch = size_t{0}; batch < batch_count; ++batch) {
+ alphas_cpp.push_back(alphas[batch]);
+ x_offsets_cpp.push_back(x_offsets[batch]);
+ y_offsets_cpp.push_back(y_offsets[batch]);
+ }
+ routine.DoAxpyBatched(n,
+ alphas_cpp,
+ Buffer<T>(x_buffer), x_offsets_cpp, x_inc,
+ Buffer<T>(y_buffer), y_offsets_cpp, y_inc,
+ batch_count);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API AxpyBatched<float>(const size_t,
+ const float*,
+ const CUdeviceptr, const size_t*, const size_t,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API AxpyBatched<double>(const size_t,
+ const double*,
+ const CUdeviceptr, const size_t*, const size_t,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API AxpyBatched<float2>(const size_t,
+ const float2*,
+ const CUdeviceptr, const size_t*, const size_t,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API AxpyBatched<double2>(const size_t,
+ const double2*,
+ const CUdeviceptr, const size_t*, const size_t,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API AxpyBatched<half>(const size_t,
+ const half*,
+ const CUdeviceptr, const size_t*, const size_t,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+
+// Batched version of GEMM: SGEMMBATCHED/DGEMMBATCHED/CGEMMBATCHED/ZGEMMBATCHED/HGEMMBATCHED
+template <typename T>
+StatusCode GemmBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const T *alphas,
+ const CUdeviceptr a_buffer, const size_t *a_offsets, const size_t a_ld,
+ const CUdeviceptr b_buffer, const size_t *b_offsets, const size_t b_ld,
+ const T *betas,
+ CUdeviceptr c_buffer, const size_t *c_offsets, const size_t c_ld,
+ const size_t batch_count,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = XgemmBatched<T>(queue_cpp, nullptr);
+ auto alphas_cpp = std::vector<T>();
+ auto betas_cpp = std::vector<T>();
+ auto a_offsets_cpp = std::vector<size_t>();
+ auto b_offsets_cpp = std::vector<size_t>();
+ auto c_offsets_cpp = std::vector<size_t>();
+ for (auto batch = size_t{0}; batch < batch_count; ++batch) {
+ alphas_cpp.push_back(alphas[batch]);
+ betas_cpp.push_back(betas[batch]);
+ a_offsets_cpp.push_back(a_offsets[batch]);
+ b_offsets_cpp.push_back(b_offsets[batch]);
+ c_offsets_cpp.push_back(c_offsets[batch]);
+ }
+ routine.DoGemmBatched(layout, a_transpose, b_transpose,
+ m, n, k,
+ alphas_cpp,
+ Buffer<T>(a_buffer), a_offsets_cpp, a_ld,
+ Buffer<T>(b_buffer), b_offsets_cpp, b_ld,
+ betas_cpp,
+ Buffer<T>(c_buffer), c_offsets_cpp, c_ld,
+ batch_count);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API GemmBatched<float>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float*,
+ const CUdeviceptr, const size_t*, const size_t,
+ const CUdeviceptr, const size_t*, const size_t,
+ const float*,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmBatched<double>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double*,
+ const CUdeviceptr, const size_t*, const size_t,
+ const CUdeviceptr, const size_t*, const size_t,
+ const double*,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmBatched<float2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float2*,
+ const CUdeviceptr, const size_t*, const size_t,
+ const CUdeviceptr, const size_t*, const size_t,
+ const float2*,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmBatched<double2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double2*,
+ const CUdeviceptr, const size_t*, const size_t,
+ const CUdeviceptr, const size_t*, const size_t,
+ const double2*,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmBatched<half>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const half*,
+ const CUdeviceptr, const size_t*, const size_t,
+ const CUdeviceptr, const size_t*, const size_t,
+ const half*,
+ CUdeviceptr, const size_t*, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/clpp11.hpp b/src/clpp11.hpp
index 9e3a6b7f..0cdd92e7 100644
--- a/src/clpp11.hpp
+++ b/src/clpp11.hpp
@@ -60,34 +60,36 @@ namespace clblast {
// =================================================================================================
// Represents a runtime error returned by an OpenCL API function
-class CLError : public ErrorCode<DeviceError, cl_int> {
+class CLCudaAPIError : public ErrorCode<DeviceError, cl_int> {
public:
- explicit CLError(cl_int status, const std::string &where):
- ErrorCode(status,
- where,
- "OpenCL error: " + where + ": " + std::to_string(static_cast<int>(status))) {
+ explicit CLCudaAPIError(cl_int status, const std::string &where):
+ ErrorCode(status, where, "OpenCL error: " + where + ": " +
+ std::to_string(static_cast<int>(status))) {
}
static void Check(const cl_int status, const std::string &where) {
if (status != CL_SUCCESS) {
- throw CLError(status, where);
+ throw CLCudaAPIError(status, where);
}
}
static void CheckDtor(const cl_int status, const std::string &where) {
if (status != CL_SUCCESS) {
- fprintf(stderr, "CLBlast: %s (ignoring)\n", CLError(status, where).what());
+ fprintf(stderr, "CLBlast: %s (ignoring)\n", CLCudaAPIError(status, where).what());
}
}
};
+// Exception returned when building a program
+using CLCudaAPIBuildError = CLCudaAPIError;
+
// =================================================================================================
// Error occurred in OpenCL
-#define CheckError(call) CLError::Check(call, CLError::TrimCallString(#call))
+#define CheckError(call) CLCudaAPIError::Check(call, CLCudaAPIError::TrimCallString(#call))
-// Error occured in OpenCL (no-exception version for destructors)
-#define CheckErrorDtor(call) CLError::CheckDtor(call, CLError::TrimCallString(#call))
+// Error occurred in OpenCL (no-exception version for destructors)
+#define CheckErrorDtor(call) CLCudaAPIError::CheckDtor(call, CLCudaAPIError::TrimCallString(#call))
// =================================================================================================
@@ -143,6 +145,9 @@ using EventPointer = cl_event*;
// =================================================================================================
+// Raw platform ID type
+using RawPlatformID = cl_platform_id;
+
// C++11 version of 'cl_platform_id'
class Platform {
public:
@@ -178,7 +183,7 @@ class Platform {
}
// Accessor to the private data-member
- const cl_platform_id& operator()() const { return platform_; }
+ const RawPlatformID& operator()() const { return platform_; }
private:
cl_platform_id platform_;
@@ -207,6 +212,9 @@ inline std::vector<Platform> GetAllPlatforms() {
// =================================================================================================
+// Raw device ID type
+using RawDeviceID = cl_device_id;
+
// C++11 version of 'cl_device_id'
class Device {
public:
@@ -231,7 +239,7 @@ class Device {
}
// Methods to retrieve device information
- cl_platform_id Platform() const { return GetInfo<cl_platform_id>(CL_DEVICE_PLATFORM); }
+ RawPlatformID PlatformID() const { return GetInfo<cl_platform_id>(CL_DEVICE_PLATFORM); }
std::string Version() const { return GetInfoString(CL_DEVICE_VERSION); }
size_t VersionNumber() const
{
@@ -263,11 +271,19 @@ class Device {
unsigned long LocalMemSize() const {
return static_cast<unsigned long>(GetInfo<cl_ulong>(CL_DEVICE_LOCAL_MEM_SIZE));
}
+
std::string Capabilities() const { return GetInfoString(CL_DEVICE_EXTENSIONS); }
bool HasExtension(const std::string &extension) const {
const auto extensions = Capabilities();
return extensions.find(extension) != std::string::npos;
}
+ bool SupportsFP64() const {
+ return HasExtension("cl_khr_fp64");
+ }
+ bool SupportsFP16() const {
+ if (Name() == "Mali-T628") { return true; } // supports fp16 but not cl_khr_fp16 officially
+ return HasExtension("cl_khr_fp16");
+ }
size_t CoreClock() const {
return static_cast<size_t>(GetInfo<cl_uint>(CL_DEVICE_MAX_CLOCK_FREQUENCY));
@@ -331,9 +347,8 @@ class Device {
std::string{"."} + std::to_string(GetInfo<cl_uint>(CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV));
}
-
// Accessor to the private data-member
- const cl_device_id& operator()() const { return device_; }
+ const RawDeviceID& operator()() const { return device_; }
private:
cl_device_id device_;
@@ -367,6 +382,9 @@ class Device {
// =================================================================================================
+// Raw context type
+using RawContext = cl_context;
+
// C++11 version of 'cl_context'
class Context {
public:
@@ -386,12 +404,12 @@ class Context {
auto status = CL_SUCCESS;
const cl_device_id dev = device();
*context_ = clCreateContext(nullptr, 1, &dev, nullptr, nullptr, &status);
- CLError::Check(status, "clCreateContext");
+ CLCudaAPIError::Check(status, "clCreateContext");
}
// Accessor to the private data-member
- const cl_context& operator()() const { return *context_; }
- cl_context* pointer() const { return &(*context_); }
+ const RawContext& operator()() const { return *context_; }
+ RawContext* pointer() const { return &(*context_); }
private:
std::shared_ptr<cl_context> context_;
};
@@ -401,9 +419,6 @@ using ContextPointer = cl_context*;
// =================================================================================================
-// Enumeration of build statuses of the run-time compilation process
-enum class BuildStatus { kSuccess, kError, kInvalid };
-
// C++11 version of 'cl_program'.
class Program {
public:
@@ -416,10 +431,10 @@ class Program {
delete p;
}) {
const char *source_ptr = &source[0];
- size_t length = source.length();
+ const auto length = source.length();
auto status = CL_SUCCESS;
*program_ = clCreateProgramWithSource(context(), 1, &source_ptr, &length, &status);
- CLError::Check(status, "clCreateProgramWithSource");
+ CLCudaAPIError::Check(status, "clCreateProgramWithSource");
}
// Binary-based constructor with memory management
@@ -429,18 +444,18 @@ class Program {
delete p;
}) {
const char *binary_ptr = &binary[0];
- size_t length = binary.length();
+ const auto length = binary.length();
auto status1 = CL_SUCCESS;
auto status2 = CL_SUCCESS;
- const cl_device_id dev = device();
+ const auto dev = device();
*program_ = clCreateProgramWithBinary(context(), 1, &dev, &length,
reinterpret_cast<const unsigned char**>(&binary_ptr),
&status1, &status2);
- CLError::Check(status1, "clCreateProgramWithBinary (binary status)");
- CLError::Check(status2, "clCreateProgramWithBinary");
+ CLCudaAPIError::Check(status1, "clCreateProgramWithBinary (binary status)");
+ CLCudaAPIError::Check(status2, "clCreateProgramWithBinary");
}
- // Compiles the device program and returns whether or not there where any warnings/errors
+ // Compiles the device program and checks whether or not there are any warnings/errors
void Build(const Device &device, std::vector<std::string> &options) {
options.push_back("-cl-std=CL1.1");
auto options_string = std::accumulate(options.begin(), options.end(), std::string{" "});
@@ -448,6 +463,11 @@ class Program {
CheckError(clBuildProgram(*program_, 1, &dev, options_string.c_str(), nullptr, nullptr));
}
+ // Confirms whether a certain status code is an actual compilation error or warning
+ bool StatusIsCompilationWarningOrError(const cl_int status) const {
+ return (status == CL_BUILD_PROGRAM_FAILURE);
+ }
+
// Retrieves the warning/error message from the compiler (if any)
std::string GetBuildInfo(const Device &device) const {
auto bytes = size_t{0};
@@ -478,6 +498,9 @@ class Program {
// =================================================================================================
+// Raw command-queue type
+using RawCommandQueue = cl_command_queue;
+
// C++11 version of 'cl_command_queue'
class Queue {
public:
@@ -496,7 +519,7 @@ class Queue {
}) {
auto status = CL_SUCCESS;
*queue_ = clCreateCommandQueue(context(), device(), CL_QUEUE_PROFILING_ENABLE, &status);
- CLError::Check(status, "clCreateCommandQueue");
+ CLCudaAPIError::Check(status, "clCreateCommandQueue");
}
// Synchronizes the queue
@@ -524,7 +547,7 @@ class Queue {
}
// Accessor to the private data-member
- const cl_command_queue& operator()() const { return *queue_; }
+ const RawCommandQueue& operator()() const { return *queue_; }
private:
std::shared_ptr<cl_command_queue> queue_;
};
@@ -588,7 +611,7 @@ class Buffer {
if (access_ == BufferAccess::kWriteOnly) { flags = CL_MEM_WRITE_ONLY; }
auto status = CL_SUCCESS;
*buffer_ = clCreateBuffer(context(), flags, size*sizeof(T), nullptr, &status);
- CLError::Check(status, "clCreateBuffer");
+ CLCudaAPIError::Check(status, "clCreateBuffer");
}
// As above, but now with read/write access as a default
@@ -646,6 +669,9 @@ class Buffer {
// Copies from host to device: writing the device buffer a-synchronously
void WriteAsync(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) {
+ if (access_ == BufferAccess::kReadOnly) {
+ throw LogicError("Buffer: writing to a read-only buffer");
+ }
if (GetSize() < (offset+size)*sizeof(T)) {
throw LogicError("Buffer: target device buffer is too small");
}
@@ -720,7 +746,7 @@ class Kernel {
}) {
auto status = CL_SUCCESS;
*kernel_ = clCreateKernel(program(), name.c_str(), &status);
- CLError::Check(status, "clCreateKernel");
+ CLCudaAPIError::Check(status, "clCreateKernel");
}
// Sets a kernel argument at the indicated position
diff --git a/src/cupp11.hpp b/src/cupp11.hpp
new file mode 100644
index 00000000..ec21c5b1
--- /dev/null
+++ b/src/cupp11.hpp
@@ -0,0 +1,782 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements a bunch of C++11 classes that act as wrappers around OpenCL objects and API
+// calls. The main benefits are increased abstraction, automatic memory management, and portability.
+// Portability here means that a similar header exists for CUDA with the same classes and
+// interfaces. In other words, moving from the OpenCL API to the CUDA API becomes a one-line change.
+//
+// This file is taken from the CLCudaAPI project <https://github.com/CNugteren/CLCudaAPI> and
+// therefore contains the following header copyright notice:
+//
+// =================================================================================================
+//
+// Copyright 2015 SURFsara
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// =================================================================================================
+
+#ifndef CLBLAST_CUPP11_H_
+#define CLBLAST_CUPP11_H_
+
+// C++
+#include <algorithm> // std::copy
+#include <string> // std::string
+#include <vector> // std::vector
+#include <memory> // std::shared_ptr
+#include <cstring> // std::strlen
+
+// CUDA
+#define CUDA_NO_HALF // Incompatible with CLBlast's definition; TODO: resolve this
+#include <cuda.h> // CUDA driver API
+#include <nvrtc.h> // NVIDIA runtime compilation API
+
+// Exception classes
+#include "cxpp11_common.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Max-length of strings
+constexpr auto kStringLength = 256;
+
+// =================================================================================================
+
+// Represents a runtime error returned by a CUDA driver API function
+class CLCudaAPIError : public ErrorCode<DeviceError, CUresult> {
+public:
+ explicit CLCudaAPIError(CUresult status, const std::string &where):
+ ErrorCode(status, where, "CUDA error: " + where + ": " +
+ GetErrorName(status) + " --> " + GetErrorString(status)) {
+ }
+
+ static void Check(const CUresult status, const std::string &where) {
+ if (status != CUDA_SUCCESS) {
+ throw CLCudaAPIError(status, where);
+ }
+ }
+
+ static void CheckDtor(const CUresult status, const std::string &where) {
+ if (status != CUDA_SUCCESS) {
+ fprintf(stderr, "CLCudaAPI: %s (ignoring)\n", CLCudaAPIError(status, where).what());
+ }
+ }
+
+private:
+ std::string GetErrorName(CUresult status) const {
+ const char* status_code;
+ cuGetErrorName(status, &status_code);
+ return std::string(status_code);
+ }
+ std::string GetErrorString(CUresult status) const {
+ const char* status_string;
+ cuGetErrorString(status, &status_string);
+ return std::string(status_string);
+ }
+};
+
+// Represents a runtime error returned by a CUDA runtime compilation API function
+class CLCudaAPINVRTCError : public ErrorCode<DeviceError, nvrtcResult> {
+public:
+ explicit CLCudaAPINVRTCError(nvrtcResult status, const std::string &where):
+ ErrorCode(status, where, "CUDA NVRTC error: " + where + ": " + GetErrorString(status)) {
+ }
+
+ static void Check(const nvrtcResult status, const std::string &where) {
+ if (status != NVRTC_SUCCESS) {
+ throw CLCudaAPINVRTCError(status, where);
+ }
+ }
+
+ static void CheckDtor(const nvrtcResult status, const std::string &where) {
+ if (status != NVRTC_SUCCESS) {
+ fprintf(stderr, "CLCudaAPI: %s (ignoring)\n", CLCudaAPINVRTCError(status, where).what());
+ }
+ }
+
+private:
+ std::string GetErrorString(nvrtcResult status) const {
+ const char* status_string = nvrtcGetErrorString(status);
+ return std::string(status_string);
+ }
+};
+
+// Exception returned when building a program
+using CLCudaAPIBuildError = CLCudaAPINVRTCError;
+
+// =================================================================================================
+
+// Error occurred in CUDA driver or runtime compilation API
+#define CheckError(call) CLCudaAPIError::Check(call, CLCudaAPIError::TrimCallString(#call))
+#define CheckErrorNVRTC(call) CLCudaAPINVRTCError::Check(call, CLCudaAPINVRTCError::TrimCallString(#call))
+
+// Error occurred in CUDA driver or runtime compilation API (no-exception version for destructors)
+#define CheckErrorDtor(call) CLCudaAPIError::CheckDtor(call, CLCudaAPIError::TrimCallString(#call))
+#define CheckErrorDtorNVRTC(call) CLCudaAPINVRTCError::CheckDtor(call, CLCudaAPINVRTCError::TrimCallString(#call))
+
+// =================================================================================================
+
+// C++11 version of two 'CUevent' pointers
+class Event {
+public:
+ // Note that there is no constructor based on the regular CUDA data-type because of extra state
+
+ // Regular constructor with memory management
+ explicit Event():
+ start_(new CUevent, [](CUevent* e) { CheckErrorDtor(cuEventDestroy(*e)); delete e; }),
+ end_(new CUevent, [](CUevent* e) { CheckErrorDtor(cuEventDestroy(*e)); delete e; }) {
+ CheckError(cuEventCreate(start_.get(), CU_EVENT_DEFAULT));
+ CheckError(cuEventCreate(end_.get(), CU_EVENT_DEFAULT));
+ }
+
+ // Waits for completion of this event (not implemented for CUDA)
+ void WaitForCompletion() const { } // not needed due to cuStreamSynchronize call after each kernel launch
+
+ // Retrieves the elapsed time of the last recorded event
+ float GetElapsedTime() const {
+ auto result = 0.0f;
+ cuEventElapsedTime(&result, *start_, *end_);
+ return result;
+ }
+
+ // Accessors to the private data-members
+ const CUevent& start() const { return *start_; }
+ const CUevent& end() const { return *end_; }
+ Event* pointer() { return this; }
+private:
+ std::shared_ptr<CUevent> start_;
+ std::shared_ptr<CUevent> end_;
+};
+
+// Pointer to a CUDA event
+using EventPointer = Event*;
+
+// =================================================================================================
+
+// Raw platform ID type
+using RawPlatformID = size_t;
+
+// The CUDA platform: initializes the CUDA driver API
+class Platform {
+public:
+
+ // Initializes the platform. Note that the platform ID variable is not actually used for CUDA.
+ explicit Platform(const size_t platform_id) : platform_id_(0) {
+ if (platform_id != 0) { throw LogicError("CUDA back-end requires a platform ID of 0"); }
+ CheckError(cuInit(0));
+ }
+
+ // Methods to retrieve platform information
+ std::string Name() const { return "CUDA"; }
+ std::string Vendor() const { return "NVIDIA Corporation"; }
+ std::string Version() const {
+ auto result = 0;
+ CheckError(cuDriverGetVersion(&result));
+ return "CUDA driver "+std::to_string(result);
+ }
+
+ // Returns the number of devices on this platform
+ size_t NumDevices() const {
+ auto result = 0;
+ CheckError(cuDeviceGetCount(&result));
+ return static_cast<size_t>(result);
+ }
+
+ // Accessor to the raw ID (which doesn't exist in the CUDA back-end, this is always just 0)
+ const RawPlatformID& operator()() const { return platform_id_; }
+private:
+ const size_t platform_id_;
+};
+
+// Retrieves a vector with all platforms. Note that there is just one platform in CUDA.
+inline std::vector<Platform> GetAllPlatforms() {
+ auto all_platforms = std::vector<Platform>{ Platform(size_t{0}) };
+ return all_platforms;
+}
+
+// =================================================================================================
+
+// Raw device ID type
+using RawDeviceID = CUdevice;
+
+// C++11 version of 'CUdevice'
+class Device {
+public:
+
+ // Constructor based on the regular CUDA data-type
+ explicit Device(const CUdevice device): device_(device) { }
+
+ // Initialization
+ explicit Device(const Platform &platform, const size_t device_id) {
+ auto num_devices = platform.NumDevices();
+ if (num_devices == 0) {
+ throw RuntimeError("Device: no devices found");
+ }
+ if (device_id >= num_devices) {
+ throw RuntimeError("Device: invalid device ID "+std::to_string(device_id));
+ }
+
+ CheckError(cuDeviceGet(&device_, device_id));
+ }
+
+ // Methods to retrieve device information
+ RawPlatformID PlatformID() const { return 0; }
+ std::string Version() const {
+ auto result = 0;
+ CheckError(cuDriverGetVersion(&result));
+ return "CUDA driver "+std::to_string(result);
+ }
+ size_t VersionNumber() const {
+ auto result = 0;
+ CheckError(cuDriverGetVersion(&result));
+ return static_cast<size_t>(result);
+ }
+ std::string Vendor() const { return "NVIDIA Corporation"; }
+ std::string Name() const {
+ auto result = std::string{};
+ result.resize(kStringLength);
+ CheckError(cuDeviceGetName(&result[0], result.size(), device_));
+ result.resize(strlen(result.c_str())); // Removes any trailing '\0'-characters
+ return result;
+ }
+ std::string Type() const { return "GPU"; }
+ size_t MaxWorkGroupSize() const {return GetInfo(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK); }
+ size_t MaxWorkItemDimensions() const { return size_t{3}; }
+ std::vector<size_t> MaxWorkItemSizes() const {
+ return std::vector<size_t>{GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X),
+ GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y),
+ GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z)};
+ }
+ unsigned long LocalMemSize() const {
+ return static_cast<unsigned long>(GetInfo(CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK));
+ }
+
+ std::string Capabilities() const {
+ const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR);
+ const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR);
+ return "SM"+std::to_string(major)+"."+std::to_string(minor);
+ }
+ std::string ComputeArch() const {
+ const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR);
+ const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR);
+ return "compute_"+std::to_string(major)+std::to_string(minor);
+ }
+ bool HasExtension(const std::string &extension) const { return false; }
+ bool SupportsFP64() const { return true; }
+ bool SupportsFP16() const {
+ const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR);
+ const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR);
+ if (major > 5) { return true; } // SM 6.x, 7.x and higher
+ if (major == 5 && minor == 3) { return true; } // SM 5.3
+ return false;
+ }
+
+ size_t CoreClock() const { return 1e-3*GetInfo(CU_DEVICE_ATTRIBUTE_CLOCK_RATE); }
+ size_t ComputeUnits() const { return GetInfo(CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT); }
+ unsigned long MemorySize() const {
+ auto result = size_t{0};
+ CheckError(cuDeviceTotalMem(&result, device_));
+ return static_cast<unsigned long>(result);
+ }
+ unsigned long MaxAllocSize() const { return MemorySize(); }
+ size_t MemoryClock() const { return 1e-3*GetInfo(CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE); }
+ size_t MemoryBusWidth() const { return GetInfo(CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH); }
+
+ // Configuration-validity checks
+ bool IsLocalMemoryValid(const size_t local_mem_usage) const {
+ return (local_mem_usage <= LocalMemSize());
+ }
+ bool IsThreadConfigValid(const std::vector<size_t> &local) const {
+ auto local_size = size_t{1};
+ for (const auto &item: local) { local_size *= item; }
+ for (auto i=size_t{0}; i<local.size(); ++i) {
+ if (local[i] > MaxWorkItemSizes()[i]) { return false; }
+ }
+ if (local_size > MaxWorkGroupSize()) { return false; }
+ if (local.size() > MaxWorkItemDimensions()) { return false; }
+ return true;
+ }
+
+ // Query for a specific type of device or brand
+ bool IsCPU() const { return false; }
+ bool IsGPU() const { return true; }
+ bool IsAMD() const { return false; }
+ bool IsNVIDIA() const { return true; }
+ bool IsIntel() const { return false; }
+ bool IsARM() const { return false; }
+
+ // Platform specific extensions
+ std::string AMDBoardName() const { return ""; }
+ std::string NVIDIAComputeCapability() const { return Capabilities(); }
+
+ // Accessor to the private data-member
+ const RawDeviceID& operator()() const { return device_; }
+private:
+ CUdevice device_;
+
+ // Private helper function
+ size_t GetInfo(const CUdevice_attribute info) const {
+ auto result = 0;
+ CheckError(cuDeviceGetAttribute(&result, info, device_));
+ return static_cast<size_t>(result);
+ }
+};
+
+// =================================================================================================
+
+// Raw context type
+using RawContext = CUcontext;
+
+// C++11 version of 'CUcontext'
+class Context {
+public:
+
+ // Constructor based on the regular CUDA data-type: memory management is handled elsewhere
+ explicit Context(const CUcontext context):
+ context_(new CUcontext) {
+ *context_ = context;
+ }
+
+ // Regular constructor with memory management
+ explicit Context(const Device &device):
+ context_(new CUcontext, [](CUcontext* c) {
+ if (*c) { CheckErrorDtor(cuCtxDestroy(*c)); }
+ delete c;
+ }) {
+ CheckError(cuCtxCreate(context_.get(), 0, device()));
+ }
+
+ // Accessor to the private data-member
+ const RawContext& operator()() const { return *context_; }
+ RawContext* pointer() const { return &(*context_); }
+private:
+ std::shared_ptr<CUcontext> context_;
+};
+
+// Pointer to a raw CUDA context
+using ContextPointer = CUcontext*;
+
+// =================================================================================================
+
+// C++11 version of 'nvrtcProgram'. Additionally holds the program's source code.
+class Program {
+public:
+ Program() = default;
+
+ // Note that there is no constructor based on the regular CUDA data-type because of extra state
+
+ // Source-based constructor with memory management
+ explicit Program(const Context &, std::string source):
+ program_(new nvrtcProgram, [](nvrtcProgram* p) {
+ if (*p) { CheckErrorDtorNVRTC(nvrtcDestroyProgram(p)); }
+ delete p;
+ }),
+ source_(std::move(source)),
+ from_binary_(false) {
+ const auto source_ptr = &source_[0];
+ CheckErrorNVRTC(nvrtcCreateProgram(program_.get(), source_ptr, nullptr, 0, nullptr, nullptr));
+ }
+
+ // PTX-based constructor
+ explicit Program(const Device &device, const Context &context, const std::string &binary):
+ program_(nullptr), // not used
+ source_(binary),
+ from_binary_(true) {
+ }
+
+ // Compiles the device program and checks whether or not there are any warnings/errors
+ void Build(const Device &device, std::vector<std::string> &options) {
+ options.push_back("-arch=" + device.ComputeArch());
+ if (from_binary_) { return; }
+ auto raw_options = std::vector<const char*>();
+ for (const auto &option: options) {
+ raw_options.push_back(option.c_str());
+ }
+ auto status = nvrtcCompileProgram(*program_, raw_options.size(), raw_options.data());
+ CLCudaAPINVRTCError::Check(status, "nvrtcCompileProgram");
+ CheckError(cuModuleLoadDataEx(&module_, GetIR().data(), 0, nullptr, nullptr));
+ }
+
+ // Confirms whether a certain status code is an actual compilation error or warning
+ bool StatusIsCompilationWarningOrError(const nvrtcResult status) const {
+ return (status == NVRTC_ERROR_COMPILATION);
+ }
+
+ // Retrieves the warning/error message from the compiler (if any)
+ std::string GetBuildInfo(const Device &) const {
+ if (from_binary_) { return std::string{}; }
+ auto bytes = size_t{0};
+ CheckErrorNVRTC(nvrtcGetProgramLogSize(*program_, &bytes));
+ auto result = std::string{};
+ result.resize(bytes);
+ CheckErrorNVRTC(nvrtcGetProgramLog(*program_, &result[0]));
+ return result;
+ }
+
+ // Retrieves an intermediate representation of the compiled program (i.e. PTX)
+ std::string GetIR() const {
+ if (from_binary_) { return source_; } // holds the PTX
+ auto bytes = size_t{0};
+ CheckErrorNVRTC(nvrtcGetPTXSize(*program_, &bytes));
+ auto result = std::string{};
+ result.resize(bytes);
+ CheckErrorNVRTC(nvrtcGetPTX(*program_, &result[0]));
+ return result;
+ }
+
+ // Accessor to the private data-members
+ const CUmodule GetModule() const { return module_; }
+ const nvrtcProgram& operator()() const { return *program_; }
+private:
+ std::shared_ptr<nvrtcProgram> program_;
+ CUmodule module_;
+ std::string source_;
+ bool from_binary_;
+};
+
+// =================================================================================================
+
+// Raw command-queue type
+using RawCommandQueue = CUstream;
+
+// C++11 version of 'CUstream'
+class Queue {
+public:
+ // Note that there is no constructor based on the regular CUDA data-type because of extra state
+
+ // Regular constructor with memory management
+ explicit Queue(const Context &context, const Device &device):
+ queue_(new CUstream, [](CUstream* s) {
+ if (*s) { CheckErrorDtor(cuStreamDestroy(*s)); }
+ delete s;
+ }),
+ context_(context),
+ device_(device) {
+ CheckError(cuStreamCreate(queue_.get(), CU_STREAM_NON_BLOCKING));
+ }
+
+ // Synchronizes the queue and optionally also an event
+ void Finish(Event &event) const {
+ CheckError(cuEventSynchronize(event.end()));
+ Finish();
+ }
+ void Finish() const {
+ CheckError(cuStreamSynchronize(*queue_));
+ }
+
+ // Retrieves the corresponding context or device
+ Context GetContext() const { return context_; }
+ Device GetDevice() const { return device_; }
+
+ // Accessor to the private data-member
+ const RawCommandQueue& operator()() const { return *queue_; }
+private:
+ std::shared_ptr<CUstream> queue_;
+ const Context context_;
+ const Device device_;
+};
+
+// =================================================================================================
+
+// C++11 version of page-locked host memory
+template <typename T>
+class BufferHost {
+public:
+
+ // Regular constructor with memory management
+ explicit BufferHost(const Context &, const size_t size):
+ buffer_(new void*, [](void** m) { CheckError(cuMemFreeHost(*m)); delete m; }),
+ size_(size) {
+ CheckError(cuMemAllocHost(buffer_.get(), size*sizeof(T)));
+ }
+
+ // Retrieves the actual allocated size in bytes
+ size_t GetSize() const {
+ return size_*sizeof(T);
+ }
+
+ // Compatibility with std::vector
+ size_t size() const { return size_; }
+ T* begin() { return &static_cast<T*>(*buffer_)[0]; }
+ T* end() { return &static_cast<T*>(*buffer_)[size_-1]; }
+ T& operator[](const size_t i) { return static_cast<T*>(*buffer_)[i]; }
+ T* data() { return static_cast<T*>(*buffer_); }
+ const T* data() const { return static_cast<T*>(*buffer_); }
+
+private:
+ std::shared_ptr<void*> buffer_;
+ const size_t size_;
+};
+
+// =================================================================================================
+
+// Enumeration of buffer access types
+enum class BufferAccess { kReadOnly, kWriteOnly, kReadWrite, kNotOwned };
+
+// C++11 version of 'CUdeviceptr'
+template <typename T>
+class Buffer {
+public:
+
+ // Constructor based on the regular CUDA data-type: memory management is handled elsewhere
+ explicit Buffer(const CUdeviceptr buffer):
+ buffer_(new CUdeviceptr),
+ access_(BufferAccess::kNotOwned) {
+ *buffer_ = buffer;
+ }
+
+ // Regular constructor with memory management. If this class does not own the buffer object, then
+ // the memory will not be freed automatically afterwards.
+ explicit Buffer(const Context &, const BufferAccess access, const size_t size):
+ buffer_(new CUdeviceptr, [access](CUdeviceptr* m) {
+ if (access != BufferAccess::kNotOwned) { CheckError(cuMemFree(*m)); }
+ delete m;
+ }),
+ access_(access) {
+ CheckError(cuMemAlloc(buffer_.get(), size*sizeof(T)));
+ }
+
+ // As above, but now with read/write access as a default
+ explicit Buffer(const Context &context, const size_t size):
+ Buffer<T>(context, BufferAccess::kReadWrite, size) {
+ }
+
+ // Constructs a new buffer based on an existing host-container
+ template <typename Iterator>
+ explicit Buffer(const Context &context, const Queue &queue, Iterator start, Iterator end):
+ Buffer(context, BufferAccess::kReadWrite, static_cast<size_t>(end - start)) {
+ auto size = static_cast<size_t>(end - start);
+ auto pointer = &*start;
+ CheckError(cuMemcpyHtoDAsync(*buffer_, pointer, size*sizeof(T), queue()));
+ queue.Finish();
+ }
+
+ // Copies from device to host: reading the device buffer a-synchronously
+ void ReadAsync(const Queue &queue, const size_t size, T* host, const size_t offset = 0) const {
+ if (access_ == BufferAccess::kWriteOnly) {
+ throw LogicError("Buffer: reading from a write-only buffer");
+ }
+ CheckError(cuMemcpyDtoHAsync(host, *buffer_ + offset*sizeof(T), size*sizeof(T), queue()));
+ }
+ void ReadAsync(const Queue &queue, const size_t size, std::vector<T> &host,
+ const size_t offset = 0) const {
+ if (host.size() < size) {
+ throw LogicError("Buffer: target host buffer is too small");
+ }
+ ReadAsync(queue, size, host.data(), offset);
+ }
+ void ReadAsync(const Queue &queue, const size_t size, BufferHost<T> &host,
+ const size_t offset = 0) const {
+ if (host.size() < size) {
+ throw LogicError("Buffer: target host buffer is too small");
+ }
+ ReadAsync(queue, size, host.data(), offset);
+ }
+
+ // Copies from device to host: reading the device buffer
+ void Read(const Queue &queue, const size_t size, T* host, const size_t offset = 0) const {
+ ReadAsync(queue, size, host, offset);
+ queue.Finish();
+ }
+ void Read(const Queue &queue, const size_t size, std::vector<T> &host,
+ const size_t offset = 0) const {
+ Read(queue, size, host.data(), offset);
+ }
+ void Read(const Queue &queue, const size_t size, BufferHost<T> &host,
+ const size_t offset = 0) const {
+ Read(queue, size, host.data(), offset);
+ }
+
+ // Copies from host to device: writing the device buffer a-synchronously
+ void WriteAsync(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) {
+ if (access_ == BufferAccess::kReadOnly) {
+ throw LogicError("Buffer: writing to a read-only buffer");
+ }
+ if (GetSize() < (offset+size)*sizeof(T)) {
+ throw LogicError("Buffer: target device buffer is too small");
+ }
+ CheckError(cuMemcpyHtoDAsync(*buffer_ + offset*sizeof(T), host, size*sizeof(T), queue()));
+ }
+ void WriteAsync(const Queue &queue, const size_t size, const std::vector<T> &host,
+ const size_t offset = 0) {
+ WriteAsync(queue, size, host.data(), offset);
+ }
+ void WriteAsync(const Queue &queue, const size_t size, const BufferHost<T> &host,
+ const size_t offset = 0) {
+ WriteAsync(queue, size, host.data(), offset);
+ }
+
+ // Copies from host to device: writing the device buffer
+ void Write(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) {
+ WriteAsync(queue, size, host, offset);
+ queue.Finish();
+ }
+ void Write(const Queue &queue, const size_t size, const std::vector<T> &host,
+ const size_t offset = 0) {
+ Write(queue, size, host.data(), offset);
+ }
+ void Write(const Queue &queue, const size_t size, const BufferHost<T> &host,
+ const size_t offset = 0) {
+ Write(queue, size, host.data(), offset);
+ }
+
+ // Copies the contents of this buffer into another device buffer
+ void CopyToAsync(const Queue &queue, const size_t size, const Buffer<T> &destination) const {
+ CheckError(cuMemcpyDtoDAsync(destination(), *buffer_, size*sizeof(T), queue()));
+ }
+ void CopyTo(const Queue &queue, const size_t size, const Buffer<T> &destination) const {
+ CopyToAsync(queue, size, destination);
+ queue.Finish();
+ }
+
+ // Retrieves the actual allocated size in bytes
+ size_t GetSize() const {
+ auto result = size_t{0};
+ CheckError(cuMemGetAddressRange(nullptr, &result, *buffer_));
+ return result;
+ }
+
+ // Accessors to the private data-members
+ CUdeviceptr operator()() const { return *buffer_; }
+ CUdeviceptr& operator()() { return *buffer_; }
+private:
+ std::shared_ptr<CUdeviceptr> buffer_;
+ const BufferAccess access_;
+};
+
+// =================================================================================================
+
+// C++11 version of 'CUfunction'
+class Kernel {
+public:
+
+ // Constructor based on the regular CUDA data-type: memory management is handled elsewhere
+ explicit Kernel(const CUfunction kernel):
+ name_("unknown"),
+ kernel_(kernel) {
+ }
+
+ // Regular constructor with memory management
+ explicit Kernel(const Program &program, const std::string &name): name_(name) {
+ CheckError(cuModuleGetFunction(&kernel_, program.GetModule(), name.c_str()));
+ }
+
+ // Sets a kernel argument at the indicated position. This stores both the value of the argument
+ // (as raw bytes) and the index indicating where this value can be found.
+ template <typename T>
+ void SetArgument(const size_t index, const T &value) {
+ if (index >= arguments_indices_.size()) { arguments_indices_.resize(index+1); }
+ arguments_indices_[index] = arguments_data_.size();
+ for (auto j=size_t(0); j<sizeof(T); ++j) {
+ arguments_data_.push_back(reinterpret_cast<const char*>(&value)[j]);
+ }
+ }
+ template <typename T>
+ void SetArgument(const size_t index, Buffer<T> &value) {
+ SetArgument(index, value());
+ }
+
+ // Sets all arguments in one go using parameter packs. Note that this resets all previously set
+ // arguments using 'SetArgument' or 'SetArguments'.
+ template <typename... Args>
+ void SetArguments(Args&... args) {
+ arguments_indices_.clear();
+ arguments_data_.clear();
+ SetArgumentsRecursive(0, args...);
+ }
+
+ // Retrieves the amount of local memory used per work-group for this kernel. Note that this the
+ // shared memory in CUDA terminology.
+ unsigned long LocalMemUsage(const Device &) const {
+ auto result = 0;
+ CheckError(cuFuncGetAttribute(&result, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel_));
+ return static_cast<unsigned long>(result);
+ }
+
+ // Retrieves the name of the kernel
+ std::string GetFunctionName() const {
+ return name_;
+ }
+
+ // Launches a kernel onto the specified queue
+ void Launch(const Queue &queue, const std::vector<size_t> &global,
+ const std::vector<size_t> &local, EventPointer event) {
+ // TODO: Currently this CUDA launch is always synchronous due to a cuStreamSynchronize call
+ if (local.size() == 0) {
+ throw LogicError("Kernel: launching with a default workgroup size is not implemented for the CUDA back-end");
+ }
+
+ // Creates the grid (number of threadblocks) and sets the block sizes (threads per block)
+ auto grid = std::vector<size_t>{1, 1, 1};
+ auto block = std::vector<size_t>{1, 1, 1};
+ if (global.size() != local.size()) { throw LogicError("invalid thread/workgroup dimensions"); }
+ for (auto i=size_t{0}; i<local.size(); ++i) { grid[i] = global[i]/local[i]; }
+ for (auto i=size_t{0}; i<local.size(); ++i) { block[i] = local[i]; }
+
+ // Creates the array of pointers from the arrays of indices & data
+ std::vector<void*> pointers;
+ for (auto &index: arguments_indices_) {
+ pointers.push_back(&arguments_data_[index]);
+ }
+
+ // Launches the kernel, its execution time is recorded by events
+ if (event) { CheckError(cuEventRecord(event->start(), queue())); }
+ CheckError(cuLaunchKernel(kernel_, grid[0], grid[1], grid[2], block[0], block[1], block[2],
+ 0, queue(), pointers.data(), nullptr));
+ cuStreamSynchronize(queue());
+ if (event) { CheckError(cuEventRecord(event->end(), queue())); }
+ }
+
+ // As above, but with an event waiting list
+ void Launch(const Queue &queue, const std::vector<size_t> &global,
+ const std::vector<size_t> &local, EventPointer event,
+ const std::vector<Event>& waitForEvents) {
+ for (auto &waitEvent : waitForEvents) {
+ waitEvent.WaitForCompletion(); // note: doesn't do anything, every kernel call is synchronous
+ }
+ return Launch(queue, global, local, event);
+ }
+
+ // Accessors to the private data-members
+ const CUfunction& operator()() const { return kernel_; }
+ CUfunction operator()() { return kernel_; }
+private:
+ const std::string name_;
+ CUfunction kernel_;
+ std::vector<size_t> arguments_indices_; // Indices of the arguments
+ std::vector<char> arguments_data_; // The arguments data as raw bytes
+
+ // Internal implementation for the recursive SetArguments function.
+ template <typename T>
+ void SetArgumentsRecursive(const size_t index, T &first) {
+ SetArgument(index, first);
+ }
+ template <typename T, typename... Args>
+ void SetArgumentsRecursive(const size_t index, T &first, Args&... args) {
+ SetArgument(index, first);
+ SetArgumentsRecursive(index+1, args...);
+ }
+};
+
+// =================================================================================================
+} // namespace clblast
+
+// CLBLAST_CUPP11_H_
+#endif
diff --git a/src/cxpp11_common.hpp b/src/cxpp11_common.hpp
index 6ac008be..5097eac4 100644
--- a/src/cxpp11_common.hpp
+++ b/src/cxpp11_common.hpp
@@ -15,6 +15,7 @@
#ifndef CLBLAST_CXPP11_COMMON_H_
#define CLBLAST_CXPP11_COMMON_H_
+#include <cstring> // strchr
#include <string> // std::string
#include <stdexcept> // std::runtime_error
diff --git a/src/database/database.cpp b/src/database/database.cpp
index 64306c7b..836c8803 100644
--- a/src/database/database.cpp
+++ b/src/database/database.cpp
@@ -124,6 +124,15 @@ std::string Database::GetDefines() const {
return defines;
}
+// ... or just the values as string
+std::string Database::GetValuesString() const {
+ std::string defines{};
+ for (auto &parameter: *parameters_) {
+ defines += "_"+ToString(parameter.second);
+ }
+ return defines;
+}
+
// Retrieves the names of all the parameters
std::vector<std::string> Database::GetParameterNames() const {
auto parameter_names = std::vector<std::string>();
diff --git a/src/database/database.hpp b/src/database/database.hpp
index 4cb0bf6a..04e9f095 100644
--- a/src/database/database.hpp
+++ b/src/database/database.hpp
@@ -53,7 +53,8 @@ class Database {
// Obtain a list of OpenCL pre-processor defines based on the parameters
std::string GetDefines() const;
- // Retrieves the names of all the parameters
+ // Retrieves the values or names of all the parameters
+ std::string GetValuesString() const;
std::vector<std::string> GetParameterNames() const;
private:
diff --git a/src/database/database_structure.hpp b/src/database/database_structure.hpp
index 9001b385..176fc556 100644
--- a/src/database/database_structure.hpp
+++ b/src/database/database_structure.hpp
@@ -17,7 +17,7 @@
#include <string>
#include <array>
#include <vector>
-#include <unordered_map>
+#include <map>
namespace clblast {
// A special namespace to hold all the global constant variables (including the database entries)
@@ -29,8 +29,8 @@ namespace database {
using Name = std::array<char, 51>; // name as stored in database (50 chars + string terminator)
using Params = std::array<size_t, 14>; // parameters as stored in database
-// Type alias after extracting from the database (map for improved code readability)
-using Parameters = std::unordered_map<std::string, size_t>; // parameters after reading from DB
+// Type alias after extracting from the database (sorted map for improved code readability)
+using Parameters = std::map<std::string, size_t>; // parameters after reading from DB
// The OpenCL device types
const std::string kDeviceTypeCPU = "CPU";
diff --git a/src/database/kernels/copy/copy_32.hpp b/src/database/kernels/copy/copy_32.hpp
index 83704fb0..15ce681c 100644
--- a/src/database/kernels/copy/copy_32.hpp
+++ b/src/database/kernels/copy/copy_32.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry CopySingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 32, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 16, 8, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 8, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,7 +105,8 @@ const DatabaseEntry CopySingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 8, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 16, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 16, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -133,6 +135,7 @@ const DatabaseEntry CopySingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 16, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 8, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 8, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 8, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
@@ -152,7 +155,7 @@ const DatabaseEntry CopySingle = {
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 32, 8, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 8, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/copy/copy_3232.hpp b/src/database/kernels/copy/copy_3232.hpp
index 7beb7bef..c10bfa06 100644
--- a/src/database/kernels/copy/copy_3232.hpp
+++ b/src/database/kernels/copy/copy_3232.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry CopyComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 16, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 16, 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 8, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -96,7 +97,8 @@ const DatabaseEntry CopyComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -122,11 +124,12 @@ const DatabaseEntry CopyComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 16, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 16, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 16, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 32, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -141,7 +144,7 @@ const DatabaseEntry CopyComplexSingle = {
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 16, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/copy/copy_64.hpp b/src/database/kernels/copy/copy_64.hpp
index 7b29448e..3b545a9c 100644
--- a/src/database/kernels/copy/copy_64.hpp
+++ b/src/database/kernels/copy/copy_64.hpp
@@ -60,13 +60,14 @@ const DatabaseEntry CopyDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 16, 32, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 16, 8, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 16, 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 16, 32, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 16, 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 32, 8, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 8, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -82,7 +83,8 @@ const DatabaseEntry CopyDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 8, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 32, 16, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -110,18 +112,19 @@ const DatabaseEntry CopyDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 8, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 32, 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 16, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/copy/copy_6464.hpp b/src/database/kernels/copy/copy_6464.hpp
index 4bd4fc83..290ad051 100644
--- a/src/database/kernels/copy/copy_6464.hpp
+++ b/src/database/kernels/copy/copy_6464.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry CopyComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 8, 8, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 8, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 8, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 16, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 32, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -82,7 +83,8 @@ const DatabaseEntry CopyComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 8, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 8, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -110,6 +112,7 @@ const DatabaseEntry CopyComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 32, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 8, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 32, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/pad/pad_32.hpp b/src/database/kernels/pad/pad_32.hpp
index a5cbf926..08ec4d1e 100644
--- a/src/database/kernels/pad/pad_32.hpp
+++ b/src/database/kernels/pad/pad_32.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry PadSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 32, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 16, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 8, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 16, 32, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,7 +105,8 @@ const DatabaseEntry PadSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 32, 8, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 32, 8, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 32, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -133,6 +135,7 @@ const DatabaseEntry PadSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 16, 32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 16, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 16, 32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/pad/pad_3232.hpp b/src/database/kernels/pad/pad_3232.hpp
index c67d6643..06823819 100644
--- a/src/database/kernels/pad/pad_3232.hpp
+++ b/src/database/kernels/pad/pad_3232.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry PadComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 8, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,7 +105,8 @@ const DatabaseEntry PadComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 16, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -132,11 +134,12 @@ const DatabaseEntry PadComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 32, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/pad/pad_64.hpp b/src/database/kernels/pad/pad_64.hpp
index 609998e9..18af357c 100644
--- a/src/database/kernels/pad/pad_64.hpp
+++ b/src/database/kernels/pad/pad_64.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry PadDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 8, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 16, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -82,7 +83,8 @@ const DatabaseEntry PadDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 16, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 32, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -110,11 +112,12 @@ const DatabaseEntry PadDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 32, 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 32, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 8, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/pad/pad_6464.hpp b/src/database/kernels/pad/pad_6464.hpp
index e3f10d09..b553e41c 100644
--- a/src/database/kernels/pad/pad_6464.hpp
+++ b/src/database/kernels/pad/pad_6464.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry PadComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 16, 16, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 8, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 16, 32, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -82,6 +83,7 @@ const DatabaseEntry PadComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 8, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -110,8 +112,9 @@ const DatabaseEntry PadComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 8, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 8, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 16, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 16, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 16, 8, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/padtranspose/padtranspose_32.hpp b/src/database/kernels/padtranspose/padtranspose_32.hpp
index 3734a40e..4b87afb2 100644
--- a/src/database/kernels/padtranspose/padtranspose_32.hpp
+++ b/src/database/kernels/padtranspose/padtranspose_32.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry PadtransposeSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 0, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 0, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 0, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,6 +105,7 @@ const DatabaseEntry PadtransposeSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -132,11 +134,12 @@ const DatabaseEntry PadtransposeSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 0, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 1, 32, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/padtranspose/padtranspose_3232.hpp b/src/database/kernels/padtranspose/padtranspose_3232.hpp
index 3ddbd319..a810aae4 100644
--- a/src/database/kernels/padtranspose/padtranspose_3232.hpp
+++ b/src/database/kernels/padtranspose/padtranspose_3232.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry PadtransposeComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 0, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,6 +105,7 @@ const DatabaseEntry PadtransposeComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -132,6 +134,7 @@ const DatabaseEntry PadtransposeComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 0, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 0, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 1, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/padtranspose/padtranspose_64.hpp b/src/database/kernels/padtranspose/padtranspose_64.hpp
index f12324d0..84b21157 100644
--- a/src/database/kernels/padtranspose/padtranspose_64.hpp
+++ b/src/database/kernels/padtranspose/padtranspose_64.hpp
@@ -60,13 +60,14 @@ const DatabaseEntry PadtransposeDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 0, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 0, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 1, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 0, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 0, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -82,6 +83,7 @@ const DatabaseEntry PadtransposeDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -110,6 +112,7 @@ const DatabaseEntry PadtransposeDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 0, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 0, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 0, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/padtranspose/padtranspose_6464.hpp b/src/database/kernels/padtranspose/padtranspose_6464.hpp
index c2e6afb1..db25b63e 100644
--- a/src/database/kernels/padtranspose/padtranspose_6464.hpp
+++ b/src/database/kernels/padtranspose/padtranspose_6464.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry PadtransposeComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 0, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 0, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1, 8, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 1, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -82,6 +83,7 @@ const DatabaseEntry PadtransposeComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -110,8 +112,9 @@ const DatabaseEntry PadtransposeComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 1, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 1, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 0, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/transpose/transpose_32.hpp b/src/database/kernels/transpose/transpose_32.hpp
index 2dab0d49..64aba156 100644
--- a/src/database/kernels/transpose/transpose_32.hpp
+++ b/src/database/kernels/transpose/transpose_32.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry TransposeSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 1, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 4, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 4, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 4, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,7 +105,8 @@ const DatabaseEntry TransposeSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 16, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 32, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -133,8 +135,9 @@ const DatabaseEntry TransposeSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 0, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 4, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 4, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 0, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 8, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/transpose/transpose_3232.hpp b/src/database/kernels/transpose/transpose_3232.hpp
index f00489d0..a82af30d 100644
--- a/src/database/kernels/transpose/transpose_3232.hpp
+++ b/src/database/kernels/transpose/transpose_3232.hpp
@@ -68,13 +68,14 @@ const DatabaseEntry TransposeComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 1, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 8, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 4, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 4, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 4, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 16, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 8, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 4, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -96,6 +97,7 @@ const DatabaseEntry TransposeComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 8, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -124,8 +126,9 @@ const DatabaseEntry TransposeComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 16, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 16, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/transpose/transpose_64.hpp b/src/database/kernels/transpose/transpose_64.hpp
index 885079dc..f8cf65fb 100644
--- a/src/database/kernels/transpose/transpose_64.hpp
+++ b/src/database/kernels/transpose/transpose_64.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry TransposeDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 0, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 4, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 4, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 4, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -82,6 +83,7 @@ const DatabaseEntry TransposeDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 8, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 8, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 8, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -110,6 +112,7 @@ const DatabaseEntry TransposeDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 8, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 16, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 8, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/transpose/transpose_6464.hpp b/src/database/kernels/transpose/transpose_6464.hpp
index 0551f7ae..89eb95a7 100644
--- a/src/database/kernels/transpose/transpose_6464.hpp
+++ b/src/database/kernels/transpose/transpose_6464.hpp
@@ -60,13 +60,14 @@ const DatabaseEntry TransposeComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 4, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 4, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 4, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 4, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 16, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 4, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 4, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -74,7 +75,8 @@ const DatabaseEntry TransposeComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 8, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 16, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -102,8 +104,9 @@ const DatabaseEntry TransposeComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 8, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 16, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/xaxpy/xaxpy_32.hpp b/src/database/kernels/xaxpy/xaxpy_32.hpp
index e6f0e040..cce43e24 100644
--- a/src/database/kernels/xaxpy/xaxpy_32.hpp
+++ b/src/database/kernels/xaxpy/xaxpy_32.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry XaxpySingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 8, 512, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 4, 2048, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1, 512, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 1, 128, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 4, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,7 +105,8 @@ const DatabaseEntry XaxpySingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 2, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 2, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 4, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 2, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -133,11 +135,12 @@ const DatabaseEntry XaxpySingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 4, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 4, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 512, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 4, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -152,7 +155,7 @@ const DatabaseEntry XaxpySingle = {
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 4, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xaxpy/xaxpy_3232.hpp b/src/database/kernels/xaxpy/xaxpy_3232.hpp
index ab8348a7..9f6a9997 100644
--- a/src/database/kernels/xaxpy/xaxpy_3232.hpp
+++ b/src/database/kernels/xaxpy/xaxpy_3232.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry XaxpyComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 4, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 4, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 4, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 1, 1024, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -104,7 +105,8 @@ const DatabaseEntry XaxpyComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 1, 512, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -132,6 +134,7 @@ const DatabaseEntry XaxpyComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 64, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 2, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 2, 512, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xaxpy/xaxpy_64.hpp b/src/database/kernels/xaxpy/xaxpy_64.hpp
index 11ce1e4b..9d03c055 100644
--- a/src/database/kernels/xaxpy/xaxpy_64.hpp
+++ b/src/database/kernels/xaxpy/xaxpy_64.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry XaxpyDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1, 2048, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 2, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 8, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -82,7 +83,8 @@ const DatabaseEntry XaxpyDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 2, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -110,6 +112,7 @@ const DatabaseEntry XaxpyDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 64, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 64, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 2, 512, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 2, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xaxpy/xaxpy_6464.hpp b/src/database/kernels/xaxpy/xaxpy_6464.hpp
index a089f3df..9b4dba24 100644
--- a/src/database/kernels/xaxpy/xaxpy_6464.hpp
+++ b/src/database/kernels/xaxpy/xaxpy_6464.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry XaxpyComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 8, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 8, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 8, 512, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -82,6 +83,7 @@ const DatabaseEntry XaxpyComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 1024, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -110,8 +112,9 @@ const DatabaseEntry XaxpyComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 64, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 512, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 1, 256, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 256, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 1, 512, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/xdot/xdot_32.hpp b/src/database/kernels/xdot/xdot_32.hpp
index a1bf963b..08900039 100644
--- a/src/database/kernels/xdot/xdot_32.hpp
+++ b/src/database/kernels/xdot/xdot_32.hpp
@@ -56,6 +56,7 @@ const DatabaseEntry XdotSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 64, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 64, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -81,7 +82,8 @@ const DatabaseEntry XdotSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 512, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -108,6 +110,7 @@ const DatabaseEntry XdotSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 128, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 512, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 256, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xdot/xdot_3232.hpp b/src/database/kernels/xdot/xdot_3232.hpp
index c2a1d9c8..06bb8d6e 100644
--- a/src/database/kernels/xdot/xdot_3232.hpp
+++ b/src/database/kernels/xdot/xdot_3232.hpp
@@ -56,6 +56,7 @@ const DatabaseEntry XdotComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 64, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 256, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -81,6 +82,7 @@ const DatabaseEntry XdotComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 128, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -107,11 +109,12 @@ const DatabaseEntry XdotComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1024, 512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 256, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 128, 512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 512, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 512, 512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xdot/xdot_64.hpp b/src/database/kernels/xdot/xdot_64.hpp
index 93d92fb7..23b6a83d 100644
--- a/src/database/kernels/xdot/xdot_64.hpp
+++ b/src/database/kernels/xdot/xdot_64.hpp
@@ -48,6 +48,7 @@ const DatabaseEntry XdotDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 64, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 512, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 64, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 256, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -60,7 +61,8 @@ const DatabaseEntry XdotDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 256, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 512, 1024, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -86,8 +88,9 @@ const DatabaseEntry XdotDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 128, 512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 256, 512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 128, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -97,7 +100,7 @@ const DatabaseEntry XdotDouble = {
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 256, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xdot/xdot_6464.hpp b/src/database/kernels/xdot/xdot_6464.hpp
index a56df4d4..4fcf9026 100644
--- a/src/database/kernels/xdot/xdot_6464.hpp
+++ b/src/database/kernels/xdot/xdot_6464.hpp
@@ -48,11 +48,12 @@ const DatabaseEntry XdotComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 1024, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -60,6 +61,7 @@ const DatabaseEntry XdotComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 64, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 512, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -86,11 +88,12 @@ const DatabaseEntry XdotComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 128, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xgemm/xgemm_32.hpp b/src/database/kernels/xgemm/xgemm_32.hpp
index a7fb7de9..a4221046 100644
--- a/src/database/kernels/xgemm/xgemm_32.hpp
+++ b/src/database/kernels/xgemm/xgemm_32.hpp
@@ -68,13 +68,14 @@ const DatabaseEntry XgemmSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 16, 2, 8, 8, 128, 16, 8, 128, 0, 1, 1, 1, 1, 8 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 2, 32, 16, 64, 32, 8, 64, 0, 1, 1, 0, 1, 1 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 8, 32, 32, 64, 32, 16, 64, 1, 1, 1, 0, 2, 2 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 2, 16, 8, 128, 16, 8, 64, 0, 0, 1, 0, 1, 2 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 2, 32, 8, 128, 8, 8, 128, 1, 1, 1, 1, 2, 8 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 16, 2, 8, 8, 128, 8, 8, 128, 1, 1, 1, 0, 1, 8 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 32, 8, 16, 16, 64, 32, 32, 64, 0, 1, 1, 0, 1, 2 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 32, 2, 16, 32, 32, 8, 8, 64, 0, 1, 0, 0, 1, 8 } },
- { kDeviceNameDefault , Params{ 32, 2, 8, 8, 32, 8, 8, 64, 1, 1, 0, 0, 4, 4 } },
+ { kDeviceNameDefault , Params{ 32, 2, 8, 8, 32, 8, 8, 64, 0, 0, 0, 0, 4, 4 } },
} },
}
},
@@ -104,7 +105,8 @@ const DatabaseEntry XgemmSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 2, 16, 8, 64, 32, 16, 64, 1, 1, 1, 1, 2, 2 } },
- { kDeviceNameDefault , Params{ 16, 2, 16, 8, 64, 32, 16, 64, 1, 1, 1, 1, 2, 2 } },
+ { Name{"GeForce GTX 580 "}, Params{ 16, 2, 32, 8, 128, 16, 32, 64, 1, 1, 1, 0, 4, 2 } },
+ { kDeviceNameDefault , Params{ 32, 2, 8, 8, 32, 32, 32, 64, 0, 0, 0, 0, 1, 2 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 16, 2, 16, 8, 32, 8, 16, 64, 1, 1, 1, 1, 2, 4 } },
@@ -133,8 +135,9 @@ const DatabaseEntry XgemmSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 16, 2, 32, 16, 128, 32, 8, 128, 1, 1, 1, 0, 4, 1 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 2, 16, 8, 64, 8, 8, 64, 1, 1, 1, 1, 4, 8 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 16, 2, 32, 16, 64, 16, 8, 128, 1, 1, 0, 1, 2, 8 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 2, 16, 16, 64, 8, 8, 64, 1, 1, 0, 0, 4, 1 } },
- { kDeviceNameDefault , Params{ 32, 2, 16, 16, 64, 8, 8, 64, 1, 1, 0, 0, 4, 1 } },
+ { kDeviceNameDefault , Params{ 32, 2, 16, 16, 64, 8, 8, 64, 1, 1, 0, 0, 4, 4 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 32, 2, 16, 16, 64, 8, 8, 64, 1, 1, 0, 0, 4, 2 } },
diff --git a/src/database/kernels/xgemm/xgemm_3232.hpp b/src/database/kernels/xgemm/xgemm_3232.hpp
index 2f51c8cb..110a2f2e 100644
--- a/src/database/kernels/xgemm/xgemm_3232.hpp
+++ b/src/database/kernels/xgemm/xgemm_3232.hpp
@@ -68,13 +68,14 @@ const DatabaseEntry XgemmComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 16, 2, 32, 8, 128, 16, 16, 128, 1, 1, 0, 1, 1, 2 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 2, 32, 32, 32, 16, 16, 128, 1, 0, 0, 0, 1, 1 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 2, 32, 16, 32, 16, 16, 64, 0, 1, 1, 0, 1, 2 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 2, 16, 16, 64, 8, 16, 64, 0, 1, 0, 0, 4, 4 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 2, 8, 8, 128, 16, 32, 128, 0, 0, 0, 0, 1, 4 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 32, 2, 8, 8, 128, 32, 8, 128, 0, 0, 0, 0, 1, 4 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 32, 2, 8, 16, 16, 16, 16, 128, 0, 0, 1, 1, 1, 4 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 32, 2, 16, 16, 16, 8, 16, 128, 0, 1, 0, 0, 1, 8 } },
- { kDeviceNameDefault , Params{ 32, 2, 16, 16, 64, 8, 8, 32, 0, 0, 0, 0, 4, 2 } },
+ { kDeviceNameDefault , Params{ 32, 2, 8, 8, 32, 8, 8, 32, 0, 0, 0, 0, 4, 2 } },
} },
}
},
@@ -104,7 +105,8 @@ const DatabaseEntry XgemmComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 2, 16, 16, 32, 32, 16, 128, 0, 1, 1, 1, 2, 2 } },
- { kDeviceNameDefault , Params{ 16, 2, 16, 16, 32, 32, 16, 128, 0, 1, 1, 1, 2, 2 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 2, 16, 8, 32, 32, 32, 128, 1, 0, 1, 0, 1, 1 } },
+ { kDeviceNameDefault , Params{ 16, 2, 16, 16, 32, 32, 16, 128, 0, 0, 1, 0, 1, 1 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 16, 8, 32, 32, 64, 32, 16, 128, 1, 0, 1, 0, 1, 4 } },
@@ -132,8 +134,9 @@ const DatabaseEntry XgemmComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 16, 2, 16, 16, 128, 16, 16, 64, 1, 1, 1, 1, 2, 4 } },
{ Name{"GeForce GTX 1080 "}, Params{ 16, 2, 32, 16, 64, 32, 8, 64, 1, 1, 0, 0, 1, 2 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 16, 2, 8, 16, 32, 16, 8, 64, 1, 1, 0, 0, 1, 1 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 2, 32, 32, 64, 8, 8, 32, 1, 1, 0, 0, 2, 4 } },
- { kDeviceNameDefault , Params{ 32, 2, 16, 16, 32, 16, 16, 64, 1, 1, 0, 0, 2, 4 } },
+ { kDeviceNameDefault , Params{ 32, 2, 8, 8, 16, 16, 16, 32, 1, 1, 0, 0, 2, 2 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 32, 2, 8, 8, 16, 32, 32, 64, 1, 1, 0, 0, 1, 1 } },
diff --git a/src/database/kernels/xgemm/xgemm_64.hpp b/src/database/kernels/xgemm/xgemm_64.hpp
index 4f75db55..b17aea7b 100644
--- a/src/database/kernels/xgemm/xgemm_64.hpp
+++ b/src/database/kernels/xgemm/xgemm_64.hpp
@@ -60,13 +60,14 @@ const DatabaseEntry XgemmDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 16, 2, 32, 8, 128, 16, 16, 128, 1, 1, 1, 1, 2, 8 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 2, 16, 8, 128, 16, 8, 128, 1, 0, 1, 1, 1, 8 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 2, 32, 16, 128, 16, 16, 64, 0, 1, 1, 0, 1, 2 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 2, 32, 16, 128, 16, 16, 128, 0, 0, 1, 0, 1, 2 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 2, 16, 8, 128, 8, 8, 64, 1, 0, 0, 1, 2, 8 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 32, 2, 16, 8, 128, 8, 8, 128, 1, 0, 0, 0, 2, 8 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 32, 2, 8, 16, 128, 16, 8, 128, 0, 0, 1, 1, 1, 8 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 32, 2, 8, 16, 64, 16, 8, 64, 0, 1, 1, 0, 1, 4 } },
- { kDeviceNameDefault , Params{ 32, 2, 16, 16, 64, 8, 8, 64, 1, 1, 0, 0, 1, 4 } },
+ { kDeviceNameDefault , Params{ 32, 2, 32, 32, 32, 16, 16, 64, 1, 1, 0, 0, 1, 4 } },
} },
}
},
@@ -82,7 +83,8 @@ const DatabaseEntry XgemmDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 2, 8, 16, 32, 32, 8, 64, 1, 1, 1, 0, 1, 2 } },
- { kDeviceNameDefault , Params{ 16, 2, 8, 16, 32, 32, 8, 64, 1, 1, 1, 0, 1, 2 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 2, 32, 16, 64, 8, 8, 32, 0, 1, 1, 1, 1, 4 } },
+ { kDeviceNameDefault , Params{ 16, 2, 32, 16, 32, 32, 8, 32, 0, 1, 1, 0, 1, 2 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 16, 2, 8, 8, 16, 8, 8, 32, 1, 0, 0, 1, 2, 2 } },
@@ -110,18 +112,19 @@ const DatabaseEntry XgemmDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 16, 2, 8, 16, 32, 8, 8, 64, 0, 0, 1, 1, 2, 8 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 2, 16, 16, 32, 16, 16, 64, 0, 0, 0, 0, 2, 4 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 16, 2, 16, 16, 16, 16, 16, 64, 0, 0, 1, 0, 1, 4 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 2, 32, 32, 32, 16, 16, 32, 0, 0, 0, 0, 1, 2 } },
{ kDeviceNameDefault , Params{ 32, 2, 16, 16, 32, 16, 16, 64, 0, 0, 0, 0, 2, 4 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 32, 2, 16, 16, 32, 16, 16, 64, 0, 0, 0, 0, 2, 4 } },
+ { kDeviceNameDefault , Params{ 32, 2, 8, 8, 32, 8, 8, 32, 0, 0, 0, 0, 1, 1 } },
} },
}
},
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 32, 2, 8, 8, 32, 8, 8, 64, 0, 0, 0, 0, 4, 4 } },
+ { kDeviceNameDefault , Params{ 32, 2, 32, 32, 32, 8, 8, 32, 1, 1, 0, 0, 1, 4 } },
} },
}
},
diff --git a/src/database/kernels/xgemm/xgemm_6464.hpp b/src/database/kernels/xgemm/xgemm_6464.hpp
index 8932d953..6d28ab77 100644
--- a/src/database/kernels/xgemm/xgemm_6464.hpp
+++ b/src/database/kernels/xgemm/xgemm_6464.hpp
@@ -60,13 +60,14 @@ const DatabaseEntry XgemmComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 16, 2, 32, 8, 64, 16, 8, 128, 0, 1, 0, 1, 2, 1 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 2, 8, 8, 32, 16, 32, 128, 1, 0, 1, 0, 4, 1 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 32, 2, 16, 32, 128, 16, 16, 64, 0, 1, 0, 0, 2, 4 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 2, 16, 32, 128, 16, 8, 32, 0, 1, 0, 0, 4, 1 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 32, 2, 8, 8, 128, 8, 16, 128, 0, 0, 0, 1, 1, 8 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 32, 2, 8, 8, 128, 32, 8, 128, 0, 0, 0, 0, 1, 4 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 32, 8, 8, 32, 32, 8, 8, 32, 0, 1, 0, 0, 1, 2 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 32, 2, 32, 8, 128, 16, 8, 128, 0, 0, 1, 1, 1, 4 } },
- { kDeviceNameDefault , Params{ 32, 2, 8, 8, 16, 8, 8, 32, 1, 1, 0, 0, 1, 2 } },
+ { kDeviceNameDefault , Params{ 32, 2, 16, 16, 32, 8, 8, 32, 1, 1, 0, 0, 2, 2 } },
} },
}
},
@@ -82,7 +83,8 @@ const DatabaseEntry XgemmComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 16, 2, 32, 32, 32, 32, 8, 32, 0, 0, 1, 0, 1, 1 } },
- { kDeviceNameDefault , Params{ 16, 2, 32, 32, 32, 32, 8, 32, 0, 0, 1, 0, 1, 1 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 2, 32, 32, 32, 8, 8, 64, 0, 0, 0, 0, 1, 2 } },
+ { kDeviceNameDefault , Params{ 16, 2, 32, 32, 32, 32, 8, 32, 0, 0, 0, 0, 1, 1 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 32, 8, 16, 16, 16, 8, 16, 64, 1, 0, 1, 1, 1, 1 } },
@@ -109,18 +111,19 @@ const DatabaseEntry XgemmComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 32, 8, 32, 16, 32, 8, 8, 32, 0, 0, 0, 1, 1, 4 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 2, 16, 16, 16, 8, 8, 16, 0, 0, 0, 0, 1, 2 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 2, 16, 16, 16, 16, 16, 16, 0, 0, 0, 0, 1, 1 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 2, 16, 16, 16, 16, 16, 16, 0, 0, 0, 0, 1, 1 } },
{ kDeviceNameDefault , Params{ 32, 2, 32, 32, 32, 32, 32, 64, 0, 0, 0, 0, 1, 2 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 32, 2, 16, 16, 32, 16, 16, 32, 0, 0, 0, 0, 1, 1 } },
+ { kDeviceNameDefault , Params{ 32, 2, 16, 16, 16, 16, 16, 32, 0, 0, 0, 0, 1, 1 } },
} },
}
},
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 32, 2, 32, 32, 32, 8, 8, 32, 1, 1, 0, 0, 1, 1 } },
+ { kDeviceNameDefault , Params{ 32, 2, 16, 16, 32, 16, 16, 64, 0, 0, 0, 0, 2, 2 } },
} },
}
},
diff --git a/src/database/kernels/xgemm_direct/xgemm_direct_32.hpp b/src/database/kernels/xgemm_direct/xgemm_direct_32.hpp
index 04d92735..7458d0b6 100644
--- a/src/database/kernels/xgemm_direct/xgemm_direct_32.hpp
+++ b/src/database/kernels/xgemm_direct/xgemm_direct_32.hpp
@@ -44,10 +44,11 @@ const DatabaseEntry XgemmDirectSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 2, 8, 8, 8, 8, 0, 0, 1, 8, 64, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 16, 16, 16, 16, 0, 0, 1, 1, 64, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 16, 16, 8, 8, 8, 0, 0, 2, 4, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 2, 8, 8, 8, 8, 0, 0, 2, 2, 64, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 2, 8, 8, 16, 8, 0, 0, 4, 4, 64, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 4, 2, 32, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 4, 4, 32, 0, 0, 0, 0 } },
} },
}
},
@@ -62,6 +63,10 @@ const DatabaseEntry XgemmDirectSingle = {
},
{ // NVIDIA GPUs
kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 2, 16, 8, 32, 16, 1, 1, 1, 1, 32, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 16, 8, 32, 16, 1, 1, 1, 1, 32, 0, 0, 0, 0 } },
+ } },
{ "SM3.0", {
{ Name{"GeForce GT 650M "}, Params{ 16, 16, 16, 8, 16, 1, 0, 2, 2, 32, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 16, 16, 16, 8, 16, 1, 0, 2, 2, 32, 0, 0, 0, 0 } },
@@ -76,8 +81,9 @@ const DatabaseEntry XgemmDirectSingle = {
} },
{ "SM6.1", {
{ Name{"GeForce GTX 1080 "}, Params{ 16, 16, 8, 16, 8, 1, 1, 1, 1, 32, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 16, 8, 8, 16, 16, 1, 1, 1, 1, 32, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 32, 8, 8, 16, 1, 1, 1, 1, 32, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 4, 2, 32, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 16, 16, 8, 8, 1, 1, 1, 1, 32, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 2, 8, 8, 16, 16, 1, 1, 4, 2, 32, 0, 0, 0, 0 } },
@@ -95,7 +101,7 @@ const DatabaseEntry XgemmDirectSingle = {
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 4, 2, 32, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xgemm_direct/xgemm_direct_3232.hpp b/src/database/kernels/xgemm_direct/xgemm_direct_3232.hpp
index 9a7f737f..4242743a 100644
--- a/src/database/kernels/xgemm_direct/xgemm_direct_3232.hpp
+++ b/src/database/kernels/xgemm_direct/xgemm_direct_3232.hpp
@@ -40,6 +40,7 @@ const DatabaseEntry XgemmDirectComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 2, 8, 8, 8, 8, 0, 0, 4, 4, 32, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 2, 16, 8, 16, 8, 0, 0, 2, 1, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 2, 16, 16, 8, 8, 1, 1, 1, 4, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 2, 8, 8, 16, 8, 1, 1, 2, 1, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 2, 8, 8, 8, 8, 1, 1, 1, 1, 8, 0, 0, 0, 0 } },
@@ -58,6 +59,10 @@ const DatabaseEntry XgemmDirectComplexSingle = {
},
{ // NVIDIA GPUs
kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 2, 16, 8, 16, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 16, 8, 16, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
+ } },
{ "SM3.5", {
{ Name{"GeForce GTX TITAN Black "}, Params{ 2, 8, 8, 16, 16, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 2, 8, 8, 16, 16, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
@@ -68,8 +73,9 @@ const DatabaseEntry XgemmDirectComplexSingle = {
} },
{ "SM6.1", {
{ Name{"GeForce GTX 1080 "}, Params{ 8, 8, 16, 16, 8, 1, 1, 2, 2, 32, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 2, 16, 8, 16, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 2, 16, 16, 8, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 2, 16, 16, 8, 8, 1, 1, 2, 4, 32, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/xgemm_direct/xgemm_direct_64.hpp b/src/database/kernels/xgemm_direct/xgemm_direct_64.hpp
index ff31fdb2..14d4ccae 100644
--- a/src/database/kernels/xgemm_direct/xgemm_direct_64.hpp
+++ b/src/database/kernels/xgemm_direct/xgemm_direct_64.hpp
@@ -36,15 +36,20 @@ const DatabaseEntry XgemmDirectDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 2, 8, 8, 8, 8, 1, 1, 4, 4, 32, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 2, 8, 8, 8, 8, 1, 1, 4, 4, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 8, 8, 8, 8, 8, 0, 0, 1, 4, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 2, 8, 8, 8, 8, 1, 1, 4, 4, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 8, 8, 8, 8, 8, 0, 1, 1, 1, 8, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 4, 2, 32, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 4, 4, 32, 0, 0, 0, 0 } },
} },
}
},
{ // NVIDIA GPUs
kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 8, 16, 16, 16, 8, 1, 0, 2, 2, 32, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 16, 16, 16, 8, 1, 0, 2, 2, 32, 0, 0, 0, 0 } },
+ } },
{ "SM3.5", {
{ Name{"GeForce GTX TITAN Black "}, Params{ 8, 16, 16, 16, 8, 1, 0, 1, 1, 16, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 8, 16, 16, 16, 8, 1, 0, 1, 1, 16, 0, 0, 0, 0 } },
@@ -55,8 +60,9 @@ const DatabaseEntry XgemmDirectDouble = {
} },
{ "SM6.1", {
{ Name{"GeForce GTX 1080 "}, Params{ 2, 16, 16, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 2, 16, 8, 8, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 2, 8, 8, 8, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/xgemm_direct/xgemm_direct_6464.hpp b/src/database/kernels/xgemm_direct/xgemm_direct_6464.hpp
index 3a5e6b96..ef6940ee 100644
--- a/src/database/kernels/xgemm_direct/xgemm_direct_6464.hpp
+++ b/src/database/kernels/xgemm_direct/xgemm_direct_6464.hpp
@@ -36,15 +36,20 @@ const DatabaseEntry XgemmDirectComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 2, 8, 8, 32, 8, 0, 0, 1, 1, 32, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 2, 16, 16, 8, 8, 0, 0, 1, 4, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 8, 16, 16, 8, 8, 0, 0, 2, 1, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 8, 16, 8, 8, 8, 0, 0, 2, 2, 32, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 2, 32, 8, 8, 8, 0, 0, 1, 4, 32, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 2, 2, 16, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
} },
}
},
{ // NVIDIA GPUs
kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
+ } },
{ "SM3.5", {
{ Name{"GeForce GTX TITAN Black "}, Params{ 2, 8, 8, 8, 8, 1, 1, 1, 1, 8, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 2, 8, 8, 8, 8, 1, 1, 1, 1, 8, 0, 0, 0, 0 } },
@@ -55,6 +60,7 @@ const DatabaseEntry XgemmDirectComplexDouble = {
} },
{ "SM6.1", {
{ Name{"GeForce GTX 1080 "}, Params{ 2, 16, 16, 8, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 2, 16, 16, 8, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 2, 16, 16, 8, 8, 1, 1, 1, 2, 16, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 2, 16, 16, 8, 8, 1, 1, 1, 1, 16, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv/xgemv_32.hpp b/src/database/kernels/xgemv/xgemv_32.hpp
index 94a66257..471273d2 100644
--- a/src/database/kernels/xgemv/xgemv_32.hpp
+++ b/src/database/kernels/xgemv/xgemv_32.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry XgemvSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 128, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 32, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -95,7 +96,8 @@ const DatabaseEntry XgemvSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -124,6 +126,7 @@ const DatabaseEntry XgemvSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv/xgemv_3232.hpp b/src/database/kernels/xgemv/xgemv_3232.hpp
index cdd9643f..3b6bb1e8 100644
--- a/src/database/kernels/xgemv/xgemv_3232.hpp
+++ b/src/database/kernels/xgemv/xgemv_3232.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry XgemvComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 128, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -95,6 +96,7 @@ const DatabaseEntry XgemvComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -116,6 +118,7 @@ const DatabaseEntry XgemvComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv/xgemv_64.hpp b/src/database/kernels/xgemv/xgemv_64.hpp
index 6828239e..3f27e5c8 100644
--- a/src/database/kernels/xgemv/xgemv_64.hpp
+++ b/src/database/kernels/xgemv/xgemv_64.hpp
@@ -52,6 +52,7 @@ const DatabaseEntry XgemvDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 128, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 64, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -73,6 +74,7 @@ const DatabaseEntry XgemvDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -101,6 +103,7 @@ const DatabaseEntry XgemvDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
@@ -112,7 +115,7 @@ const DatabaseEntry XgemvDouble = {
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xgemv/xgemv_6464.hpp b/src/database/kernels/xgemv/xgemv_6464.hpp
index fbc4e7be..97a5f586 100644
--- a/src/database/kernels/xgemv/xgemv_6464.hpp
+++ b/src/database/kernels/xgemv/xgemv_6464.hpp
@@ -52,6 +52,7 @@ const DatabaseEntry XgemvComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 64, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 128, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -73,6 +74,7 @@ const DatabaseEntry XgemvComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -80,6 +82,10 @@ const DatabaseEntry XgemvComplexDouble = {
{ Name{"GeForce GTX 670 "}, Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
+ { "SM6.1", {
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
{ "default", {
{ kDeviceNameDefault , Params{ 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv_fast/xgemv_fast_32.hpp b/src/database/kernels/xgemv_fast/xgemv_fast_32.hpp
index 93081aa9..018073fa 100644
--- a/src/database/kernels/xgemv_fast/xgemv_fast_32.hpp
+++ b/src/database/kernels/xgemv_fast/xgemv_fast_32.hpp
@@ -60,12 +60,13 @@ const DatabaseEntry XgemvFastSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 1, 32, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 4, 128, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 4, 32, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 1, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 2, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 4, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 4, 128, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -95,7 +96,8 @@ const DatabaseEntry XgemvFastSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 2, 256, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -124,6 +126,7 @@ const DatabaseEntry XgemvFastSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv_fast/xgemv_fast_3232.hpp b/src/database/kernels/xgemv_fast/xgemv_fast_3232.hpp
index c207e25b..414f13f9 100644
--- a/src/database/kernels/xgemv_fast/xgemv_fast_3232.hpp
+++ b/src/database/kernels/xgemv_fast/xgemv_fast_3232.hpp
@@ -60,12 +60,13 @@ const DatabaseEntry XgemvFastComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 2, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 4, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 1, 128, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 2, 128, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 4, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 4, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 1, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 64, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -95,6 +96,7 @@ const DatabaseEntry XgemvFastComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -105,6 +107,7 @@ const DatabaseEntry XgemvFastComplexSingle = {
} },
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
diff --git a/src/database/kernels/xgemv_fast/xgemv_fast_64.hpp b/src/database/kernels/xgemv_fast/xgemv_fast_64.hpp
index 0aabd703..72e2de2b 100644
--- a/src/database/kernels/xgemv_fast/xgemv_fast_64.hpp
+++ b/src/database/kernels/xgemv_fast/xgemv_fast_64.hpp
@@ -52,12 +52,13 @@ const DatabaseEntry XgemvFastDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 1, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 1, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 4, 128, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 1, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 1, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 1, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -73,6 +74,7 @@ const DatabaseEntry XgemvFastDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -101,6 +103,7 @@ const DatabaseEntry XgemvFastDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 1, 32, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 1, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 256, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv_fast/xgemv_fast_6464.hpp b/src/database/kernels/xgemv_fast/xgemv_fast_6464.hpp
index 095e5b37..d4272e26 100644
--- a/src/database/kernels/xgemv_fast/xgemv_fast_6464.hpp
+++ b/src/database/kernels/xgemv_fast/xgemv_fast_6464.hpp
@@ -52,6 +52,7 @@ const DatabaseEntry XgemvFastComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 32, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 4, 32, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 2, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 1, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz "}, Params{ 4, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -73,6 +74,7 @@ const DatabaseEntry XgemvFastComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 1, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -80,6 +82,10 @@ const DatabaseEntry XgemvFastComplexDouble = {
{ Name{"GeForce GTX 670 "}, Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
+ { "SM6.1", {
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 1, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
{ "default", {
{ kDeviceNameDefault , Params{ 1, 64, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_32.hpp b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_32.hpp
index faf4d5e0..690b0a3f 100644
--- a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_32.hpp
+++ b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_32.hpp
@@ -44,11 +44,12 @@ const DatabaseEntry XgemvFastRotSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 8, 128, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 4, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -66,6 +67,10 @@ const DatabaseEntry XgemvFastRotSingle = {
},
{ // NVIDIA GPUs
kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
{ "SM3.0", {
{ Name{"GeForce GT 650M "}, Params{ 8, 32, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 8, 32, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -81,6 +86,7 @@ const DatabaseEntry XgemvFastRotSingle = {
} },
{ "SM6.1", {
{ Name{"GeForce GTX 1080 "}, Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 64, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 64, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 8, 64, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_3232.hpp b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_3232.hpp
index d6d3a3cf..52a57fb3 100644
--- a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_3232.hpp
+++ b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_3232.hpp
@@ -44,11 +44,12 @@ const DatabaseEntry XgemvFastRotComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 8, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 4, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 4, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 4, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -64,6 +65,21 @@ const DatabaseEntry XgemvFastRotComplexSingle = {
} },
}
},
+ { // NVIDIA GPUs
+ kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 1, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 1, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
+ { "SM6.1", {
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 4, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 4, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
+ { "default", {
+ { kDeviceNameDefault , Params{ 4, 32, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
+ }
+ },
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
diff --git a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_64.hpp b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_64.hpp
index fc4a3b71..6818dbbc 100644
--- a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_64.hpp
+++ b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_64.hpp
@@ -36,6 +36,7 @@ const DatabaseEntry XgemvFastRotDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 4, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 4, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -46,6 +47,10 @@ const DatabaseEntry XgemvFastRotDouble = {
},
{ // NVIDIA GPUs
kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 2, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
{ "SM3.5", {
{ Name{"GeForce GTX TITAN "}, Params{ 1, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX TITAN Black "}, Params{ 1, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -57,6 +62,7 @@ const DatabaseEntry XgemvFastRotDouble = {
} },
{ "SM6.1", {
{ Name{"GeForce GTX 1080 "}, Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 8, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
diff --git a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_6464.hpp b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_6464.hpp
index 1c575e7a..510102d0 100644
--- a/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_6464.hpp
+++ b/src/database/kernels/xgemv_fast_rot/xgemv_fast_rot_6464.hpp
@@ -36,6 +36,7 @@ const DatabaseEntry XgemvFastRotComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 2, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 8, 16, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 4, 64, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 2, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 8, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -44,6 +45,21 @@ const DatabaseEntry XgemvFastRotComplexDouble = {
} },
}
},
+ { // NVIDIA GPUs
+ kDeviceTypeGPU, "NVIDIA", {
+ { "SM2.0", {
+ { Name{"GeForce GTX 580 "}, Params{ 2, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 2, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
+ { "SM6.1", {
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 8, 32, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 8, 32, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
+ { "default", {
+ { kDeviceNameDefault , Params{ 2, 32, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ } },
+ }
+ },
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
diff --git a/src/database/kernels/xger/xger_32.hpp b/src/database/kernels/xger/xger_32.hpp
index 8db90ba0..97f1dc81 100644
--- a/src/database/kernels/xger/xger_32.hpp
+++ b/src/database/kernels/xger/xger_32.hpp
@@ -68,6 +68,7 @@ const DatabaseEntry XgerSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 32, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 256, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 128, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 256, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 256, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -94,6 +95,7 @@ const DatabaseEntry XgerSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 256, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 128, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 256, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -116,11 +118,12 @@ const DatabaseEntry XgerSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 512, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 16, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 64, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 512, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 512, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 128, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 128, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xger/xger_3232.hpp b/src/database/kernels/xger/xger_3232.hpp
index 2456e440..f214e889 100644
--- a/src/database/kernels/xger/xger_3232.hpp
+++ b/src/database/kernels/xger/xger_3232.hpp
@@ -68,12 +68,13 @@ const DatabaseEntry XgerComplexSingle = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 128, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 512, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 256, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 256, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 256, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 512, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 256, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 256, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 128, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -94,7 +95,8 @@ const DatabaseEntry XgerComplexSingle = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 128, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 128, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 16, 8, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
{ Name{"GRID K520 "}, Params{ 64, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -115,8 +117,9 @@ const DatabaseEntry XgerComplexSingle = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 16, 64, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 128, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 256, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
{ kDeviceNameDefault , Params{ 128, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
diff --git a/src/database/kernels/xger/xger_64.hpp b/src/database/kernels/xger/xger_64.hpp
index ba85ae2b..08bf96c9 100644
--- a/src/database/kernels/xger/xger_64.hpp
+++ b/src/database/kernels/xger/xger_64.hpp
@@ -60,6 +60,7 @@ const DatabaseEntry XgerDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 256, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 128, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 512, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 256, 1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 256, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
@@ -73,6 +74,7 @@ const DatabaseEntry XgerDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 32, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 32, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 32, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -94,18 +96,19 @@ const DatabaseEntry XgerDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 32, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 512, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 32, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 512, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 128, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 64, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
{ // Default
kDeviceTypeAll, "default", {
{ "default", {
- { kDeviceNameDefault , Params{ 128, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 256, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/database/kernels/xger/xger_6464.hpp b/src/database/kernels/xger/xger_6464.hpp
index 9e016d3d..d1202ce4 100644
--- a/src/database/kernels/xger/xger_6464.hpp
+++ b/src/database/kernels/xger/xger_6464.hpp
@@ -60,12 +60,13 @@ const DatabaseEntry XgerComplexDouble = {
kDeviceTypeCPU, "Intel", {
{ "default", {
{ Name{"Intel(R) Core(TM) i7-2670QM CPU @ 2.20GHz "}, Params{ 128, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"Intel(R) Core(TM) i5-4570 CPU @ 3.20GHz "}, Params{ 512, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz "}, Params{ 512, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz "}, Params{ 256, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz "}, Params{ 512, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz "}, Params{ 256, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"Intel(R) Core(TM) i7-6770HQ CPU @ 2.60GHz "}, Params{ 256, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 256, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 128, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
@@ -73,6 +74,7 @@ const DatabaseEntry XgerComplexDouble = {
kDeviceTypeGPU, "NVIDIA", {
{ "SM2.0", {
{ Name{"GeForce GTX 480 "}, Params{ 64, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 580 "}, Params{ 16, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ kDeviceNameDefault , Params{ 64, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "SM3.0", {
@@ -94,11 +96,12 @@ const DatabaseEntry XgerComplexDouble = {
{ "SM6.1", {
{ Name{"GeForce GTX 1070 "}, Params{ 8, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"GeForce GTX 1080 "}, Params{ 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { Name{"GeForce GTX 1080 Ti "}, Params{ 4, 32, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ Name{"TITAN X (Pascal) "}, Params{ 4, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
- { kDeviceNameDefault , Params{ 8, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 256, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
{ "default", {
- { kDeviceNameDefault , Params{ 16, 8, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
+ { kDeviceNameDefault , Params{ 32, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
} },
}
},
diff --git a/src/kernels/common.opencl b/src/kernels/common.opencl
index 9481881e..01c411bc 100644
--- a/src/kernels/common.opencl
+++ b/src/kernels/common.opencl
@@ -24,14 +24,16 @@ R"(
// =================================================================================================
-// Enable support for double-precision
-#if PRECISION == 16
- #pragma OPENCL EXTENSION cl_khr_fp16: enable
-#endif
+#ifndef CUDA
+ // Enable support for double-precision
+ #if PRECISION == 16
+ #pragma OPENCL EXTENSION cl_khr_fp16: enable
+ #endif
-// Enable support for double-precision
-#if PRECISION == 64 || PRECISION == 6464
- #pragma OPENCL EXTENSION cl_khr_fp64: enable
+ // Enable support for double-precision
+ #if PRECISION == 64 || PRECISION == 6464
+ #pragma OPENCL EXTENSION cl_khr_fp64: enable
+ #endif
#endif
// Half-precision
@@ -117,10 +119,15 @@ R"(
#define GetRealArg(x) x
#endif
+// Pointers to local memory objects (using a define because CUDA doesn't need them)
+#ifndef LOCAL_PTR
+ #define LOCAL_PTR __local
+#endif
+
// =================================================================================================
// Don't use the non-IEEE754 compliant OpenCL built-in mad() instruction per default. For specific
-// devices, this is enabled (see src/routine.cc).
+// devices, this is enabled (see src/routine.cpp).
#ifndef USE_CL_MAD
#define USE_CL_MAD 0
#endif
@@ -254,18 +261,18 @@ R"(
// http://docs.nvidia.com/cuda/samples/6_Advanced/transpose/doc/MatrixTranspose.pdf
// More details: https://github.com/CNugteren/CLBlast/issues/53
#if USE_STAGGERED_INDICES == 1
- INLINE_FUNC size_t GetGroupIDFlat() {
+ INLINE_FUNC int GetGroupIDFlat() {
return get_group_id(0) + get_num_groups(0) * get_group_id(1);
}
- INLINE_FUNC size_t GetGroupID1() {
+ INLINE_FUNC int GetGroupID1() {
return (GetGroupIDFlat()) % get_num_groups(1);
}
- INLINE_FUNC size_t GetGroupID0() {
+ INLINE_FUNC int GetGroupID0() {
return ((GetGroupIDFlat() / get_num_groups(1)) + GetGroupID1()) % get_num_groups(0);
}
#else
- INLINE_FUNC size_t GetGroupID1() { return get_group_id(1); }
- INLINE_FUNC size_t GetGroupID0() { return get_group_id(0); }
+ INLINE_FUNC int GetGroupID1() { return get_group_id(1); }
+ INLINE_FUNC int GetGroupID0() { return get_group_id(0); }
#endif
// =================================================================================================
diff --git a/src/kernels/level2/level2.opencl b/src/kernels/level2/level2.opencl
index 505231ca..ff46c2a5 100644
--- a/src/kernels/level2/level2.opencl
+++ b/src/kernels/level2/level2.opencl
@@ -34,7 +34,7 @@ R"(
// Returns an element from a vector
INLINE_FUNC real LoadVector(const int id, const int max,
- __global real* gm, const int offset, const int inc,
+ const __global real* gm, const int offset, const int inc,
const int do_conjugate) {
if (id < max) {
real result = gm[id*inc + offset];
diff --git a/src/kernels/level3/invert_diagonal_blocks.opencl b/src/kernels/level3/invert_diagonal_blocks.opencl
index 93241700..281fdcff 100644
--- a/src/kernels/level3/invert_diagonal_blocks.opencl
+++ b/src/kernels/level3/invert_diagonal_blocks.opencl
@@ -164,7 +164,7 @@ void InvertDiagonalBlock(int n, __global const real* restrict src, const int src
// =================================================================================================
// Triple matrix-multiplication kernel: C = A * B
-INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, __local real* blm, int n,
+INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, LOCAL_PTR real* blm, int n,
__global const real* agm, __global const real* bgm, __global real* cgm,
const int lda, const int ldb, const int ldc,
int current_size, int num_pages, const int block_size) {
@@ -250,7 +250,7 @@ INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part,
// =================================================================================================
// Triple matrix-multiplication kernel part 1: B12 = A12 * B22 (upper) or B21 = A21 * B11 (lower)
-INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, __local real* blm, int n,
+INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, LOCAL_PTR real* blm, int n,
__global const real* src, const int a_offset, const int lda,
__global real* dest, int current_size, int num_pages, const int block_size) {
@@ -286,7 +286,7 @@ INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, __local rea
}
// Triple matrix-multiplication kernel part 1: B12 = -B11 * B12 (upper) or B21 = -B22 * B21 (lower)
-INLINE_FUNC void TripleMatMulPart2(const int size, const bool upper, __local real* blm, const int n,
+INLINE_FUNC void TripleMatMulPart2(const int size, const bool upper, LOCAL_PTR real* blm, const int n,
__global real* dest, int current_size, int num_pages, const int block_size) {
// Emulates a 3D grid: NX * (NY * num_pages)
diff --git a/src/kernels/level3/transpose_fast.opencl b/src/kernels/level3/transpose_fast.opencl
index 70156d3a..37b25d99 100644
--- a/src/kernels/level3/transpose_fast.opencl
+++ b/src/kernels/level3/transpose_fast.opencl
@@ -84,39 +84,39 @@ void TransposeMatrixFast(const int ld,
#if TRA_WPT == 1
results[0] = v[0];
#elif TRA_WPT == 2
- results[0] = (realT) {v[0].x, v[1].x};
- results[1] = (realT) {v[0].y, v[1].y};
+ results[0].x = v[0].x; results[0].y = v[1].x;
+ results[1].x = v[0].y; results[1].y = v[1].y;
#elif TRA_WPT == 4
- results[0] = (realT) {v[0].x, v[1].x, v[2].x, v[3].x};
- results[1] = (realT) {v[0].y, v[1].y, v[2].y, v[3].y};
- results[2] = (realT) {v[0].z, v[1].z, v[2].z, v[3].z};
- results[3] = (realT) {v[0].w, v[1].w, v[2].w, v[3].w};
+ results[0].x = v[0].x; results[0].y = v[1].x; results[0].z = v[2].x; results[0].w = v[3].x;
+ results[1].x = v[0].y; results[1].y = v[1].y; results[1].z = v[2].y; results[1].w = v[3].y;
+ results[2].x = v[0].z; results[2].y = v[1].z; results[2].z = v[2].z; results[2].w = v[3].z;
+ results[3].x = v[0].w; results[3].y = v[1].w; results[3].z = v[2].w; results[3].w = v[3].w;
#elif TRA_WPT == 8
- results[0] = (realT) {v[0].s0, v[1].s0, v[2].s0, v[3].s0, v[4].s0, v[5].s0, v[6].s0, v[7].s0};
- results[1] = (realT) {v[0].s1, v[1].s1, v[2].s1, v[3].s1, v[4].s1, v[5].s1, v[6].s1, v[7].s1};
- results[2] = (realT) {v[0].s2, v[1].s2, v[2].s2, v[3].s2, v[4].s2, v[5].s2, v[6].s2, v[7].s2};
- results[3] = (realT) {v[0].s3, v[1].s3, v[2].s3, v[3].s3, v[4].s3, v[5].s3, v[6].s3, v[7].s3};
- results[4] = (realT) {v[0].s4, v[1].s4, v[2].s4, v[3].s4, v[4].s4, v[5].s4, v[6].s4, v[7].s4};
- results[5] = (realT) {v[0].s5, v[1].s5, v[2].s5, v[3].s5, v[4].s5, v[5].s5, v[6].s5, v[7].s5};
- results[6] = (realT) {v[0].s6, v[1].s6, v[2].s6, v[3].s6, v[4].s6, v[5].s6, v[6].s6, v[7].s6};
- results[7] = (realT) {v[0].s7, v[1].s7, v[2].s7, v[3].s7, v[4].s7, v[5].s7, v[6].s7, v[7].s7};
+ results[0].s0 = v[0].s0; results[0].s1 = v[1].s0; results[0].s2 = v[2].s0; results[0].s3 = v[3].s0; results[0].s4 = v[4].s0; results[0].s5 = v[5].s0; results[0].s6 = v[6].s0; results[0].s7 = v[7].s0;
+ results[1].s0 = v[0].s1; results[1].s1 = v[1].s1; results[1].s2 = v[2].s1; results[1].s3 = v[3].s1; results[1].s4 = v[4].s1; results[1].s5 = v[5].s1; results[1].s6 = v[6].s1; results[1].s7 = v[7].s1;
+ results[2].s0 = v[0].s2; results[2].s1 = v[1].s2; results[2].s2 = v[2].s2; results[2].s3 = v[3].s2; results[2].s4 = v[4].s2; results[2].s5 = v[5].s2; results[2].s6 = v[6].s2; results[2].s7 = v[7].s2;
+ results[3].s0 = v[0].s3; results[3].s1 = v[1].s3; results[3].s2 = v[2].s3; results[3].s3 = v[3].s3; results[3].s4 = v[4].s3; results[3].s5 = v[5].s3; results[3].s6 = v[6].s3; results[3].s7 = v[7].s3;
+ results[4].s0 = v[0].s4; results[4].s1 = v[1].s4; results[4].s2 = v[2].s4; results[4].s3 = v[3].s4; results[4].s4 = v[4].s4; results[4].s5 = v[5].s4; results[4].s6 = v[6].s4; results[4].s7 = v[7].s4;
+ results[5].s0 = v[0].s5; results[5].s1 = v[1].s5; results[5].s2 = v[2].s5; results[5].s3 = v[3].s5; results[5].s4 = v[4].s5; results[5].s5 = v[5].s5; results[5].s6 = v[6].s5; results[5].s7 = v[7].s5;
+ results[6].s0 = v[0].s6; results[6].s1 = v[1].s6; results[6].s2 = v[2].s6; results[6].s3 = v[3].s6; results[6].s4 = v[4].s6; results[6].s5 = v[5].s6; results[6].s6 = v[6].s6; results[6].s7 = v[7].s6;
+ results[7].s0 = v[0].s7; results[7].s1 = v[1].s7; results[7].s2 = v[2].s7; results[7].s3 = v[3].s7; results[7].s4 = v[4].s7; results[7].s5 = v[5].s7; results[7].s6 = v[6].s7; results[7].s7 = v[7].s7;
#elif TRA_WPT == 16
- results[ 0] = (realT) {v[0].s0, v[1].s0, v[2].s0, v[3].s0, v[4].s0, v[5].s0, v[6].s0, v[7].s0, v[8].s0, v[9].s0, v[10].s0, v[11].s0, v[12].s0, v[13].s0, v[14].s0, v[15].s0};
- results[ 1] = (realT) {v[0].s1, v[1].s1, v[2].s1, v[3].s1, v[4].s1, v[5].s1, v[6].s1, v[7].s1, v[8].s1, v[9].s1, v[10].s1, v[11].s1, v[12].s1, v[13].s1, v[14].s1, v[15].s1};
- results[ 2] = (realT) {v[0].s2, v[1].s2, v[2].s2, v[3].s2, v[4].s2, v[5].s2, v[6].s2, v[7].s2, v[8].s2, v[9].s2, v[10].s2, v[11].s2, v[12].s2, v[13].s2, v[14].s2, v[15].s2};
- results[ 3] = (realT) {v[0].s3, v[1].s3, v[2].s3, v[3].s3, v[4].s3, v[5].s3, v[6].s3, v[7].s3, v[8].s3, v[9].s3, v[10].s3, v[11].s3, v[12].s3, v[13].s3, v[14].s3, v[15].s3};
- results[ 4] = (realT) {v[0].s4, v[1].s4, v[2].s4, v[3].s4, v[4].s4, v[5].s4, v[6].s4, v[7].s4, v[8].s4, v[9].s4, v[10].s4, v[11].s4, v[12].s4, v[13].s4, v[14].s4, v[15].s4};
- results[ 5] = (realT) {v[0].s5, v[1].s5, v[2].s5, v[3].s5, v[4].s5, v[5].s5, v[6].s5, v[7].s5, v[8].s5, v[9].s5, v[10].s5, v[11].s5, v[12].s5, v[13].s5, v[14].s5, v[15].s5};
- results[ 6] = (realT) {v[0].s6, v[1].s6, v[2].s6, v[3].s6, v[4].s6, v[5].s6, v[6].s6, v[7].s6, v[8].s6, v[9].s6, v[10].s6, v[11].s6, v[12].s6, v[13].s6, v[14].s6, v[15].s6};
- results[ 7] = (realT) {v[0].s7, v[1].s7, v[2].s7, v[3].s7, v[4].s7, v[5].s7, v[6].s7, v[7].s7, v[8].s7, v[9].s7, v[10].s7, v[11].s7, v[12].s7, v[13].s7, v[14].s7, v[15].s7};
- results[ 8] = (realT) {v[0].s8, v[1].s8, v[2].s8, v[3].s8, v[4].s8, v[5].s8, v[6].s8, v[7].s8, v[8].s8, v[9].s8, v[10].s8, v[11].s8, v[12].s8, v[13].s8, v[14].s8, v[15].s8};
- results[ 9] = (realT) {v[0].s9, v[1].s9, v[2].s9, v[3].s9, v[4].s9, v[5].s9, v[6].s9, v[7].s9, v[8].s9, v[9].s9, v[10].s9, v[11].s9, v[12].s9, v[13].s9, v[14].s9, v[15].s9};
- results[10] = (realT) {v[0].sA, v[1].sA, v[2].sA, v[3].sA, v[4].sA, v[5].sA, v[6].sA, v[7].sA, v[8].sA, v[9].sA, v[10].sA, v[11].sA, v[12].sA, v[13].sA, v[14].sA, v[15].sA};
- results[11] = (realT) {v[0].sB, v[1].sB, v[2].sB, v[3].sB, v[4].sB, v[5].sB, v[6].sB, v[7].sB, v[8].sB, v[9].sB, v[10].sB, v[11].sB, v[12].sB, v[13].sB, v[14].sB, v[15].sB};
- results[12] = (realT) {v[0].sC, v[1].sC, v[2].sC, v[3].sC, v[4].sC, v[5].sC, v[6].sC, v[7].sC, v[8].sC, v[9].sC, v[10].sC, v[11].sC, v[12].sC, v[13].sC, v[14].sC, v[15].sC};
- results[13] = (realT) {v[0].sD, v[1].sD, v[2].sD, v[3].sD, v[4].sD, v[5].sD, v[6].sD, v[7].sD, v[8].sD, v[9].sD, v[10].sD, v[11].sD, v[12].sD, v[13].sD, v[14].sD, v[15].sD};
- results[14] = (realT) {v[0].sE, v[1].sE, v[2].sE, v[3].sE, v[4].sE, v[5].sE, v[6].sE, v[7].sE, v[8].sE, v[9].sE, v[10].sE, v[11].sE, v[12].sE, v[13].sE, v[14].sE, v[15].sE};
- results[15] = (realT) {v[0].sF, v[1].sF, v[2].sF, v[3].sF, v[4].sF, v[5].sF, v[6].sF, v[7].sF, v[8].sF, v[9].sF, v[10].sF, v[11].sF, v[12].sF, v[13].sF, v[14].sF, v[15].sF};
+ results[ 0].s0 = v[0].s0; results[ 0].s1 = v[1].s0; results[ 0].s2 = v[2].s0; results[ 0].s3 = v[3].s0; results[ 0].s4 = v[4].s0; results[ 0].s5 = v[5].s0; results[ 0].s6 = v[6].s0; results[ 0].s7 = v[7].s0; results[ 0].s8 = v[8].s0; results[ 0].s9 = v[9].s0; results[ 0].sA = v[10].s0; results[ 0].sB = v[11].s0; results[ 0].sC = v[12].s0; results[ 0].sD = v[13].s0; results[ 0].sE = v[14].s0; results[ 0].sF = v[15].s0;
+ results[ 1].s0 = v[0].s1; results[ 1].s1 = v[1].s1; results[ 1].s2 = v[2].s1; results[ 1].s3 = v[3].s1; results[ 1].s4 = v[4].s1; results[ 1].s5 = v[5].s1; results[ 1].s6 = v[6].s1; results[ 1].s7 = v[7].s1; results[ 1].s8 = v[8].s1; results[ 1].s9 = v[9].s1; results[ 1].sA = v[10].s1; results[ 1].sB = v[11].s1; results[ 1].sC = v[12].s1; results[ 1].sD = v[13].s1; results[ 1].sE = v[14].s1; results[ 1].sF = v[15].s1;
+ results[ 2].s0 = v[0].s2; results[ 2].s1 = v[1].s2; results[ 2].s2 = v[2].s2; results[ 2].s3 = v[3].s2; results[ 2].s4 = v[4].s2; results[ 2].s5 = v[5].s2; results[ 2].s6 = v[6].s2; results[ 2].s7 = v[7].s2; results[ 2].s8 = v[8].s2; results[ 2].s9 = v[9].s2; results[ 2].sA = v[10].s2; results[ 2].sB = v[11].s2; results[ 2].sC = v[12].s2; results[ 2].sD = v[13].s2; results[ 2].sE = v[14].s2; results[ 2].sF = v[15].s2;
+ results[ 3].s0 = v[0].s3; results[ 3].s1 = v[1].s3; results[ 3].s2 = v[2].s3; results[ 3].s3 = v[3].s3; results[ 3].s4 = v[4].s3; results[ 3].s5 = v[5].s3; results[ 3].s6 = v[6].s3; results[ 3].s7 = v[7].s3; results[ 3].s8 = v[8].s3; results[ 3].s9 = v[9].s3; results[ 3].sA = v[10].s3; results[ 3].sB = v[11].s3; results[ 3].sC = v[12].s3; results[ 3].sD = v[13].s3; results[ 3].sE = v[14].s3; results[ 3].sF = v[15].s3;
+ results[ 4].s0 = v[0].s4; results[ 4].s1 = v[1].s4; results[ 4].s2 = v[2].s4; results[ 4].s3 = v[3].s4; results[ 4].s4 = v[4].s4; results[ 4].s5 = v[5].s4; results[ 4].s6 = v[6].s4; results[ 4].s7 = v[7].s4; results[ 4].s8 = v[8].s4; results[ 4].s9 = v[9].s4; results[ 4].sA = v[10].s4; results[ 4].sB = v[11].s4; results[ 4].sC = v[12].s4; results[ 4].sD = v[13].s4; results[ 4].sE = v[14].s4; results[ 4].sF = v[15].s4;
+ results[ 5].s0 = v[0].s5; results[ 5].s1 = v[1].s5; results[ 5].s2 = v[2].s5; results[ 5].s3 = v[3].s5; results[ 5].s4 = v[4].s5; results[ 5].s5 = v[5].s5; results[ 5].s6 = v[6].s5; results[ 5].s7 = v[7].s5; results[ 5].s8 = v[8].s5; results[ 5].s9 = v[9].s5; results[ 5].sA = v[10].s5; results[ 5].sB = v[11].s5; results[ 5].sC = v[12].s5; results[ 5].sD = v[13].s5; results[ 5].sE = v[14].s5; results[ 5].sF = v[15].s5;
+ results[ 6].s0 = v[0].s6; results[ 6].s1 = v[1].s6; results[ 6].s2 = v[2].s6; results[ 6].s3 = v[3].s6; results[ 6].s4 = v[4].s6; results[ 6].s5 = v[5].s6; results[ 6].s6 = v[6].s6; results[ 6].s7 = v[7].s6; results[ 6].s8 = v[8].s6; results[ 6].s9 = v[9].s6; results[ 6].sA = v[10].s6; results[ 6].sB = v[11].s6; results[ 6].sC = v[12].s6; results[ 6].sD = v[13].s6; results[ 6].sE = v[14].s6; results[ 6].sF = v[15].s6;
+ results[ 7].s0 = v[0].s7; results[ 7].s1 = v[1].s7; results[ 7].s2 = v[2].s7; results[ 7].s3 = v[3].s7; results[ 7].s4 = v[4].s7; results[ 7].s5 = v[5].s7; results[ 7].s6 = v[6].s7; results[ 7].s7 = v[7].s7; results[ 7].s8 = v[8].s7; results[ 7].s9 = v[9].s7; results[ 7].sA = v[10].s7; results[ 7].sB = v[11].s7; results[ 7].sC = v[12].s7; results[ 7].sD = v[13].s7; results[ 7].sE = v[14].s7; results[ 7].sF = v[15].s7;
+ results[ 8].s0 = v[0].s8; results[ 8].s1 = v[1].s8; results[ 8].s2 = v[2].s8; results[ 8].s3 = v[3].s8; results[ 8].s4 = v[4].s8; results[ 8].s5 = v[5].s8; results[ 8].s6 = v[6].s8; results[ 8].s7 = v[7].s8; results[ 8].s8 = v[8].s8; results[ 8].s9 = v[9].s8; results[ 8].sA = v[10].s8; results[ 8].sB = v[11].s8; results[ 8].sC = v[12].s8; results[ 8].sD = v[13].s8; results[ 8].sE = v[14].s8; results[ 8].sF = v[15].s8;
+ results[ 9].s0 = v[0].s9; results[ 9].s1 = v[1].s9; results[ 9].s2 = v[2].s9; results[ 9].s3 = v[3].s9; results[ 9].s4 = v[4].s9; results[ 9].s5 = v[5].s9; results[ 9].s6 = v[6].s9; results[ 9].s7 = v[7].s9; results[ 9].s8 = v[8].s9; results[ 9].s9 = v[9].s9; results[ 9].sA = v[10].s9; results[ 9].sB = v[11].s9; results[ 9].sC = v[12].s9; results[ 9].sD = v[13].s9; results[ 9].sE = v[14].s9; results[ 9].sF = v[15].s9;
+ results[10].s0 = v[0].sA; results[10].s1 = v[1].sA; results[10].s2 = v[2].sA; results[10].s3 = v[3].sA; results[10].s4 = v[4].sA; results[10].s5 = v[5].sA; results[10].s6 = v[6].sA; results[10].s7 = v[7].sA; results[10].s8 = v[8].sA; results[10].s9 = v[9].sA; results[10].sA = v[10].sA; results[10].sB = v[11].sA; results[10].sC = v[12].sA; results[10].sD = v[13].sA; results[10].sE = v[14].sA; results[10].sF = v[15].sA;
+ results[11].s0 = v[0].sB; results[11].s1 = v[1].sB; results[11].s2 = v[2].sB; results[11].s3 = v[3].sB; results[11].s4 = v[4].sB; results[11].s5 = v[5].sB; results[11].s6 = v[6].sB; results[11].s7 = v[7].sB; results[11].s8 = v[8].sB; results[11].s9 = v[9].sB; results[11].sA = v[10].sB; results[11].sB = v[11].sB; results[11].sC = v[12].sB; results[11].sD = v[13].sB; results[11].sE = v[14].sB; results[11].sF = v[15].sB;
+ results[12].s0 = v[0].sC; results[12].s1 = v[1].sC; results[12].s2 = v[2].sC; results[12].s3 = v[3].sC; results[12].s4 = v[4].sC; results[12].s5 = v[5].sC; results[12].s6 = v[6].sC; results[12].s7 = v[7].sC; results[12].s8 = v[8].sC; results[12].s9 = v[9].sC; results[12].sA = v[10].sC; results[12].sB = v[11].sC; results[12].sC = v[12].sC; results[12].sD = v[13].sC; results[12].sE = v[14].sC; results[12].sF = v[15].sC;
+ results[13].s0 = v[0].sD; results[13].s1 = v[1].sD; results[13].s2 = v[2].sD; results[13].s3 = v[3].sD; results[13].s4 = v[4].sD; results[13].s5 = v[5].sD; results[13].s6 = v[6].sD; results[13].s7 = v[7].sD; results[13].s8 = v[8].sD; results[13].s9 = v[9].sD; results[13].sA = v[10].sD; results[13].sB = v[11].sD; results[13].sC = v[12].sD; results[13].sD = v[13].sD; results[13].sE = v[14].sD; results[13].sF = v[15].sD;
+ results[14].s0 = v[0].sE; results[14].s1 = v[1].sE; results[14].s2 = v[2].sE; results[14].s3 = v[3].sE; results[14].s4 = v[4].sE; results[14].s5 = v[5].sE; results[14].s6 = v[6].sE; results[14].s7 = v[7].sE; results[14].s8 = v[8].sE; results[14].s9 = v[9].sE; results[14].sA = v[10].sE; results[14].sB = v[11].sE; results[14].sC = v[12].sE; results[14].sD = v[13].sE; results[14].sE = v[14].sE; results[14].sF = v[15].sE;
+ results[15].s0 = v[0].sF; results[15].s1 = v[1].sF; results[15].s2 = v[2].sF; results[15].s3 = v[3].sF; results[15].s4 = v[4].sF; results[15].s5 = v[5].sF; results[15].s6 = v[6].sF; results[15].s7 = v[7].sF; results[15].s8 = v[8].sF; results[15].s9 = v[9].sF; results[15].sA = v[10].sF; results[15].sB = v[11].sF; results[15].sC = v[12].sF; results[15].sD = v[13].sF; results[15].sE = v[14].sF; results[15].sF = v[15].sF;
#endif
// Multiplies by alpha and then stores the results into the destination matrix
diff --git a/src/kernels/level3/transpose_pad.opencl b/src/kernels/level3/transpose_pad.opencl
index 49c5b9a3..ba9a6a56 100644
--- a/src/kernels/level3/transpose_pad.opencl
+++ b/src/kernels/level3/transpose_pad.opencl
@@ -24,7 +24,7 @@ R"(
// Transposes a matrix from source to destination. The output is padded with zero values in case the
// destination matrix dimensions are larger than the transposed source matrix dimensions.
-INLINE_FUNC void _TransposePadMatrix(__local real* tile,
+INLINE_FUNC void _TransposePadMatrix(LOCAL_PTR real* tile,
const int src_one, const int src_two,
const int src_ld, const int src_offset,
__global const real* restrict src,
@@ -105,7 +105,7 @@ void TransposePadMatrix(const int src_one, const int src_two,
// Transposes a matrix, while considering possible padding in the source matrix. Data is read from a
// padded source matrix, but only the actual data is written back to the transposed destination
// matrix. This kernel optionally checks for upper/lower triangular matrices.
-INLINE_FUNC void _TransposeMatrix(__local real* tile,
+INLINE_FUNC void _TransposeMatrix(LOCAL_PTR real* tile,
const int src_one, const int src_two,
const int src_ld, const int src_offset,
__global const real* restrict src,
diff --git a/src/kernels/level3/xgemm_direct_batched.opencl b/src/kernels/level3/xgemm_direct_batched.opencl
index fa582cff..d946a056 100644
--- a/src/kernels/level3/xgemm_direct_batched.opencl
+++ b/src/kernels/level3/xgemm_direct_batched.opencl
@@ -19,8 +19,8 @@ R"(
// =================================================================================================
// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, non-transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
@@ -40,8 +40,8 @@ __kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int
}
// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
@@ -61,8 +61,8 @@ __kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int
}
// Direct version of the batched GEMM kernel with [A, B] = [transposed, non-transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
@@ -82,8 +82,8 @@ __kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int
}
// Direct version of the batched GEMM kernel with [A, B] = [transposed, transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
diff --git a/src/kernels/level3/xgemm_direct_part1.opencl b/src/kernels/level3/xgemm_direct_part1.opencl
index 8b650589..7d185224 100644
--- a/src/kernels/level3/xgemm_direct_part1.opencl
+++ b/src/kernels/level3/xgemm_direct_part1.opencl
@@ -184,7 +184,7 @@ INLINE_FUNC void GlobalToPrivateCheckedB(const __global real* restrict bgms, rea
// Caches on-chip local memory into per-thread private memory (registers). This function is specific
// for caching the A input matrix.
-INLINE_FUNC void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg,
+INLINE_FUNC void LocalToPrivateDirectA(LOCAL_PTR real* alm, real apm[MWID], const int kg,
const int a_transpose) {
#pragma unroll
for (int mi=0; mi<MWID; ++mi) {
@@ -195,7 +195,7 @@ INLINE_FUNC void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const
}
// Same as above, but now for the B input matrix
-INLINE_FUNC void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg,
+INLINE_FUNC void LocalToPrivateDirectB(LOCAL_PTR real* blm, real bpm[NWID], const int kg,
const int b_transpose) {
#pragma unroll
for (int ni=0; ni<NWID; ++ni) {
diff --git a/src/kernels/level3/xgemm_direct_part2.opencl b/src/kernels/level3/xgemm_direct_part2.opencl
index 1d9330fc..c3bf1b80 100644
--- a/src/kernels/level3/xgemm_direct_part2.opencl
+++ b/src/kernels/level3/xgemm_direct_part2.opencl
@@ -19,7 +19,7 @@ R"(
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix.
-INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, __local real* alm,
+INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, LOCAL_PTR real* alm,
const int a_ld, const int a_offset, const int kwg,
const int a_transpose, const int a_conjugate) {
#if MDIMCD == MDIMAD
@@ -90,7 +90,7 @@ INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, __loc
}
// Same as above, but now for the B input matrix
-INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, __local real* blm,
+INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, LOCAL_PTR real* blm,
const int b_ld, const int b_offset, const int kwg,
const int b_transpose, const int b_conjugate) {
#if MDIMCD == NDIMBD
@@ -165,7 +165,7 @@ INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, __loc
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix. In contrast to the functions above, this function performs doesn't
// use the vector data-types.
-INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, __local real* alm,
+INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, LOCAL_PTR real* alm,
const int a_ld, const int a_offset, const int kwg,
const int a_transpose, const int a_conjugate) {
#if MDIMCD == MDIMAD
@@ -196,7 +196,7 @@ INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, __loca
}
// Same as above, but now for the B input matrix
-INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, __local real* blm,
+INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, LOCAL_PTR real* blm,
const int b_ld, const int b_offset, const int kwg,
const int b_transpose, const int b_conjugate) {
#if MDIMCD == NDIMBD
@@ -231,7 +231,7 @@ INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, __loca
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix. In contrast to the functions above, this function performs bounds
// checks and doesn't use the vector data-types.
-INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, __local real* alm,
+INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, LOCAL_PTR real* alm,
const int a_ld, const int a_offset, const int kwg,
const int a_transpose, const int a_conjugate,
const int kSizeM, const int kSizeK) {
@@ -270,7 +270,7 @@ INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, __loc
}
// Same as above, but now for the B input matrix
-INLINE_FUNC void GlobalToLocalCheckedB(const __global real* restrict bgms, __local real* blm,
+INLINE_FUNC void GlobalToLocalCheckedB(const __global real* restrict bgms, LOCAL_PTR real* blm,
const int b_ld, const int b_offset, const int kwg,
const int b_transpose, const int b_conjugate,
const int kSizeN, const int kSizeK) {
diff --git a/src/kernels/level3/xgemm_direct_part3.opencl b/src/kernels/level3/xgemm_direct_part3.opencl
index b0beb614..5862dfa3 100644
--- a/src/kernels/level3/xgemm_direct_part3.opencl
+++ b/src/kernels/level3/xgemm_direct_part3.opencl
@@ -24,7 +24,7 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
__global real* cgm, const int c_offset, const int c_ld,
- __local real* alm, __local real* blm,
+ LOCAL_PTR real* alm, LOCAL_PTR real* blm,
const int a_transpose, const int b_transpose, const int c_transpose,
const int a_conjugate, const int b_conjugate) {
const real alpha = GetRealArg(arg_alpha);
@@ -147,8 +147,8 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize
// =================================================================================================
// Direct version of the GEMM kernel with [A, B] = [non-transposed, non-transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
@@ -162,8 +162,8 @@ __kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK
}
// Direct version of the GEMM kernel with [A, B] = [non-transposed, transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
@@ -177,8 +177,8 @@ __kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK
}
// Direct version of the GEMM kernel with [A, B] = [transposed, non-transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
@@ -192,8 +192,8 @@ __kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK
}
// Direct version of the GEMM kernel with [A, B] = [transposed, transposed]
-__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
-__kernel void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK,
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_alpha, const real_arg arg_beta,
const __global realMD* restrict agm, const int a_offset, const int a_ld,
const __global realND* restrict bgm, const int b_offset, const int b_ld,
diff --git a/src/kernels/level3/xgemm_part1.opencl b/src/kernels/level3/xgemm_part1.opencl
index 07dafe13..172b3c6b 100644
--- a/src/kernels/level3/xgemm_part1.opencl
+++ b/src/kernels/level3/xgemm_part1.opencl
@@ -186,7 +186,7 @@ INLINE_FUNC void InitAccRegisters(realM cpm[NWI][MWI/VWM]) {
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix.
#if SA == 1
-INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, __local realM* alm,
+INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, LOCAL_PTR realM* alm,
const int kSizeM, const int tid, const int kwg) {
const int la0 = tid % MDIMA;
const int la1 = tid / MDIMA;
@@ -216,7 +216,7 @@ INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, __local real
// Same as above, but now for the B input matrix
#if SB == 1
-INLINE_FUNC void GlobalToLocalB(const __global realN* restrict bgm, __local realN* blm,
+INLINE_FUNC void GlobalToLocalB(const __global realN* restrict bgm, LOCAL_PTR realN* blm,
const int kSizeN, const int tid, const int kwg) {
const int lb0 = tid % NDIMB;
const int lb1 = tid / NDIMB;
@@ -298,7 +298,7 @@ INLINE_FUNC void GlobalToPrivateB(const __global realN* restrict bgm, realN bpm[
// Caches on-chip local memory into per-thread private memory (registers). This function is specific
// for caching the A input matrix.
#if SA == 1
-INLINE_FUNC void LocalToPrivateA(__local realM* alm, realM apm[MWI/VWM], const int kg) {
+INLINE_FUNC void LocalToPrivateA(LOCAL_PTR realM* alm, realM apm[MWI/VWM], const int kg) {
#pragma unroll
for (int mi=0; mi<MWI/VWM; ++mi) {
#if STRM == 0
@@ -313,7 +313,7 @@ INLINE_FUNC void LocalToPrivateA(__local realM* alm, realM apm[MWI/VWM], const i
// Same as above, but now for the B input matrix
#if SB == 1
-INLINE_FUNC void LocalToPrivateB(__local realN* blm, realN bpm[NWI/VWN], const int kg) {
+INLINE_FUNC void LocalToPrivateB(LOCAL_PTR realN* blm, realN bpm[NWI/VWN], const int kg) {
#pragma unroll
for (int ni=0; ni<NWI/VWN; ++ni) {
#if STRN == 0
diff --git a/src/kernels/level3/xgemm_part3.opencl b/src/kernels/level3/xgemm_part3.opencl
index 3f0d590d..ce24907c 100644
--- a/src/kernels/level3/xgemm_part3.opencl
+++ b/src/kernels/level3/xgemm_part3.opencl
@@ -17,16 +17,16 @@ R"(
// =================================================================================================
-// Main body of the matrix-multiplication algorithm. It calls the (inlined) functions above.
+// Main body of the matrix-multiplication algorithm. It calls various (inlined) functions.
INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK,
const __global realM* restrict agm, const __global realN* restrict bgm,
__global realM* cgm, realM cpm[NWI][MWI/VWM]
#if SA == 1 && SB == 1
- , __local realM* alm, __local realN* blm
+ , LOCAL_PTR realM* alm, LOCAL_PTR realN* blm
#elif SA == 1
- , __local realM* alm
+ , LOCAL_PTR realM* alm
#elif SB == 1
- , __local realN* blm
+ , LOCAL_PTR realN* blm
#endif
) {
@@ -192,10 +192,15 @@ void Xgemm(const int kSizeM, const int kSizeN, const int kSizeK,
const real_arg arg_beta,
const __global realM* restrict agm,
const __global realN* restrict bgm,
- __global realM* cgm) {
+ __global realM* cgm,
+ const int b_offset, const int c_offset) {
const real alpha = GetRealArg(arg_alpha);
const real beta = GetRealArg(arg_beta);
+ // Adds the offsets (in case of use of a single temporary buffer for A, B, and C)
+ bgm = &bgm[b_offset];
+ cgm = &cgm[c_offset];
+
// Allocates workgroup-private memory (local memory)
#if SA == 1
__local realM alm[KWG * MWG/VWM];
diff --git a/src/kernels/opencl_to_cuda.h b/src/kernels/opencl_to_cuda.h
new file mode 100644
index 00000000..5682a456
--- /dev/null
+++ b/src/kernels/opencl_to_cuda.h
@@ -0,0 +1,90 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file contains an (incomplete) header to interpret OpenCL kernels as CUDA kernels.
+//
+// =================================================================================================
+
+// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
+// literal). Comment-out this line for syntax-highlighting when developing.
+R"(
+// =================================================================================================
+
+// CLBlast specific additions
+#define CUDA 1
+#define LOCAL_PTR // pointers to local memory don't have to be annotated in CUDA
+
+// Replaces the OpenCL get_xxx_ID with CUDA equivalents
+__device__ int get_local_id(const int x) {
+ if (x == 0) { return threadIdx.x; }
+ if (x == 1) { return threadIdx.y; }
+ return threadIdx.z;
+}
+__device__ int get_group_id(const int x) {
+ if (x == 0) { return blockIdx.x; }
+ if (x == 1) { return blockIdx.y; }
+ return blockIdx.z;
+}
+__device__ int get_local_size(const int x) {
+ if (x == 0) { return blockDim.x; }
+ if (x == 1) { return blockDim.y; }
+ return blockDim.z;
+}
+__device__ int get_num_groups(const int x) {
+ if (x == 0) { return gridDim.x; }
+ if (x == 1) { return gridDim.y; }
+ return gridDim.z;
+}
+__device__ int get_global_size(const int x) {
+ if (x == 0) { return gridDim.x * blockDim.x; }
+ if (x == 1) { return gridDim.y * blockDim.y; }
+ return gridDim.z * blockDim.z;
+}
+__device__ int get_global_id(const int x) {
+ if (x == 0) { return blockIdx.x*blockDim.x + threadIdx.x; }
+ if (x == 1) { return blockIdx.y*blockDim.y + threadIdx.y; }
+ return blockIdx.z*blockDim.z + threadIdx.z;
+}
+
+// Adds the data-types which are not available natively under CUDA
+typedef struct { float s0; float s1; float s2; float s3;
+ float s4; float s5; float s6; float s7; } float8;
+typedef struct { float s0; float s1; float s2; float s3;
+ float s4; float s5; float s6; float s7;
+ float s8; float s9; float s10; float s11;
+ float s12; float s13; float s14; float s15; } float16;
+typedef struct { double s0; double s1; double s2; double s3;
+ double s4; double s5; double s6; double s7; } double8;
+typedef struct { double s0; double s1; double s2; double s3;
+ double s4; double s5; double s6; double s7;
+ double s8; double s9; double s10; double s11;
+ double s12; double s13; double s14; double s15; } double16;
+
+// Replaces the OpenCL keywords with CUDA equivalent
+#define __kernel __placeholder__
+#define __global
+#define __placeholder__ extern "C" __global__
+#define __local __shared__
+#define restrict __restrict__
+#define __constant const
+#define inline __device__ // assumes all device functions are annotated with inline in OpenCL
+
+// Kernel attributes (don't replace currently)
+#define reqd_work_group_size(x, y, z)
+
+// Replaces OpenCL synchronisation with CUDA synchronisation
+#define barrier(x) __syncthreads()
+
+// =================================================================================================
+
+// End of the C++11 raw string literal
+)"
+
+// =================================================================================================
+
diff --git a/src/routine.cpp b/src/routine.cpp
index c305feb8..0f9fe360 100644
--- a/src/routine.cpp
+++ b/src/routine.cpp
@@ -60,7 +60,6 @@ Routine::Routine(Queue &queue, EventPointer event, const std::string &name,
event_(event),
context_(queue_.GetContext()),
device_(queue_.GetDevice()),
- platform_(device_.Platform()),
db_(kernel_names) {
InitDatabase(userDatabase);
@@ -68,26 +67,35 @@ Routine::Routine(Queue &queue, EventPointer event, const std::string &name,
}
void Routine::InitDatabase(const std::vector<database::DatabaseEntry> &userDatabase) {
+ const auto platform_id = device_.PlatformID();
for (const auto &kernel_name : kernel_names_) {
// Queries the cache to see whether or not the kernel parameter database is already there
bool has_db;
- db_(kernel_name) = DatabaseCache::Instance().Get(DatabaseKeyRef{ platform_, device_(), precision_, kernel_name },
+ db_(kernel_name) = DatabaseCache::Instance().Get(DatabaseKeyRef{ platform_id, device_(), precision_, kernel_name },
&has_db);
if (has_db) { continue; }
// Builds the parameter database for this device and routine set and stores it in the cache
+ log_debug("Searching database for kernel '" + kernel_name + "'");
db_(kernel_name) = Database(device_, kernel_name, precision_, userDatabase);
- DatabaseCache::Instance().Store(DatabaseKey{ platform_, device_(), precision_, kernel_name },
+ DatabaseCache::Instance().Store(DatabaseKey{ platform_id, device_(), precision_, kernel_name },
Database{ db_(kernel_name) });
}
}
void Routine::InitProgram(std::initializer_list<const char *> source) {
+ // Determines the identifier for this particular routine call
+ auto routine_info = routine_name_;
+ for (const auto &kernel_name : kernel_names_) {
+ routine_info += "_" + kernel_name + db_(kernel_name).GetValuesString();
+ }
+ log_debug(routine_info);
+
// Queries the cache to see whether or not the program (context-specific) is already there
bool has_program;
- program_ = ProgramCache::Instance().Get(ProgramKeyRef{ context_(), device_(), precision_, routine_name_ },
+ program_ = ProgramCache::Instance().Get(ProgramKeyRef{ context_(), device_(), precision_, routine_info },
&has_program);
if (has_program) { return; }
@@ -102,12 +110,12 @@ void Routine::InitProgram(std::initializer_list<const char *> source) {
// is, a program is created and stored in the cache
const auto device_name = GetDeviceName(device_);
bool has_binary;
- auto binary = BinaryCache::Instance().Get(BinaryKeyRef{ precision_, routine_name_, device_name },
+ auto binary = BinaryCache::Instance().Get(BinaryKeyRef{ precision_, routine_info, device_name },
&has_binary);
if (has_binary) {
program_ = Program(device_, context_, binary);
program_.Build(device_, options);
- ProgramCache::Instance().Store(ProgramKey{ context_(), device_(), precision_, routine_name_ },
+ ProgramCache::Instance().Store(ProgramKey{ context_(), device_(), precision_, routine_info },
Program{ program_ });
return;
}
@@ -115,13 +123,13 @@ void Routine::InitProgram(std::initializer_list<const char *> source) {
// Otherwise, the kernel will be compiled and program will be built. Both the binary and the
// program will be added to the cache.
- // Inspects whether or not cl_khr_fp64 is supported in case of double precision
+ // Inspects whether or not FP64 is supported in case of double precision
if ((precision_ == Precision::kDouble && !PrecisionSupported<double>(device_)) ||
(precision_ == Precision::kComplexDouble && !PrecisionSupported<double2>(device_))) {
throw RuntimeErrorCode(StatusCode::kNoDoublePrecision);
}
- // As above, but for cl_khr_fp16 (half precision)
+ // As above, but for FP16 (half precision)
if (precision_ == Precision::kHalf && !PrecisionSupported<half>(device_)) {
throw RuntimeErrorCode(StatusCode::kNoHalfPrecision);
}
@@ -159,6 +167,13 @@ void Routine::InitProgram(std::initializer_list<const char *> source) {
source_string += "#define GLOBAL_MEM_FENCE 1\n";
}
+ // Optionally adds a translation header from OpenCL kernels to CUDA kernels
+ #ifdef CUDA_API
+ source_string +=
+ #include "kernels/opencl_to_cuda.h"
+ ;
+ #endif
+
// Loads the common header (typedefs and defines and such)
source_string +=
#include "kernels/common.opencl"
@@ -180,8 +195,8 @@ void Routine::InitProgram(std::initializer_list<const char *> source) {
program_ = Program(context_, source_string);
try {
program_.Build(device_, options);
- } catch (const CLError &e) {
- if (e.status() == CL_BUILD_PROGRAM_FAILURE) {
+ } catch (const CLCudaAPIBuildError &e) {
+ if (program_.StatusIsCompilationWarningOrError(e.status())) {
fprintf(stdout, "OpenCL compiler error/warning: %s\n",
program_.GetBuildInfo(device_).c_str());
}
@@ -189,10 +204,10 @@ void Routine::InitProgram(std::initializer_list<const char *> source) {
}
// Store the compiled binary and program in the cache
- BinaryCache::Instance().Store(BinaryKey{ precision_, routine_name_, device_name },
+ BinaryCache::Instance().Store(BinaryKey{precision_, routine_info, device_name},
program_.GetIR());
- ProgramCache::Instance().Store(ProgramKey{ context_(), device_(), precision_, routine_name_ },
+ ProgramCache::Instance().Store(ProgramKey{context_(), device_(), precision_, routine_info},
Program{ program_ });
// Prints the elapsed compilation time in case of debugging in verbose mode
diff --git a/src/routine.hpp b/src/routine.hpp
index e77e35ad..a8f1cb6a 100644
--- a/src/routine.hpp
+++ b/src/routine.hpp
@@ -75,7 +75,6 @@ class Routine {
EventPointer event_;
const Context context_;
const Device device_;
- const cl_platform_id platform_;
// Compiled program (either retrieved from cache or compiled in slow path)
Program program_;
diff --git a/src/routines/common.hpp b/src/routines/common.hpp
index 84ccd9d2..bf3b1762 100644
--- a/src/routines/common.hpp
+++ b/src/routines/common.hpp
@@ -19,8 +19,7 @@
#include <string>
#include <vector>
-#include "clpp11.hpp"
-#include "clblast.h"
+#include "utilities/utilities.hpp"
#include "database/database.hpp"
namespace clblast {
diff --git a/src/routines/level2/xtrsv.cpp b/src/routines/level2/xtrsv.cpp
index d5d009ff..36c33a76 100644
--- a/src/routines/level2/xtrsv.cpp
+++ b/src/routines/level2/xtrsv.cpp
@@ -131,10 +131,13 @@ void Xtrsv<T>::DoTrsv(const Layout layout, const Triangle triangle,
if (i > 0) {
const auto gemv_m = (a_transpose == Transpose::kNo) ? block_size : i;
const auto gemv_n = (a_transpose == Transpose::kNo) ? i : block_size;
- DoGemv(layout, a_transpose, gemv_m, gemv_n, ConstantOne<T>(),
- a_buffer, a_offset + extra_offset_a, a_ld,
- x_buffer, x_offset + extra_offset_x, x_inc, ConstantOne<T>(),
- x_buffer, x_offset + extra_offset_b, x_inc );
+ auto gemv_event = Event();
+ auto gemv = Xgemv<T>(queue_, gemv_event.pointer());
+ gemv.DoGemv(layout, a_transpose, gemv_m, gemv_n, ConstantOne<T>(),
+ a_buffer, a_offset + extra_offset_a, a_ld,
+ x_buffer, x_offset + extra_offset_x, x_inc, ConstantOne<T>(),
+ x_buffer, x_offset + extra_offset_b, x_inc);
+ gemv_event.WaitForCompletion();
}
// Runs the triangular substitution for the block size
diff --git a/src/routines/level3/xgemm.cpp b/src/routines/level3/xgemm.cpp
index 3909c308..a0063ee2 100644
--- a/src/routines/level3/xgemm.cpp
+++ b/src/routines/level3/xgemm.cpp
@@ -161,10 +161,24 @@ void Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
auto c_no_temp = c_one == c_one_i && c_two == c_two_i && c_ld == c_one && c_offset == 0 &&
c_do_transpose == false;
- // Creates the temporary matrices
- const auto a_temp = (a_no_temp) ? a_buffer : Buffer<T>(context_, a_one_i*a_two_i);
- const auto b_temp = (b_no_temp) ? b_buffer : Buffer<T>(context_, b_one_i*b_two_i);
- const auto c_temp = (c_no_temp) ? c_buffer : Buffer<T>(context_, c_one_i*c_two_i);
+ // Computes the sizes and offsets for (optional) temporary buffers for the 3 matrices
+ auto temp_size = size_t{0};
+ auto b_temp_offset = size_t{0};
+ auto c_temp_offset = size_t{0};
+ if (!a_no_temp) { temp_size += a_one_i*a_two_i; }
+ if (!b_no_temp) { b_temp_offset = temp_size; temp_size += b_one_i*b_two_i; }
+ if (!c_no_temp) { c_temp_offset = temp_size; temp_size += c_one_i*c_two_i; }
+ if (!IsMultiple(b_temp_offset, db_["VWN"])) { throw BLASError(StatusCode::kUnexpectedError); }
+ if (!IsMultiple(c_temp_offset, db_["VWM"])) { throw BLASError(StatusCode::kUnexpectedError); }
+
+ // Creates the buffer for the (optional) temporary matrices. Note that we use 'a_buffer' in case
+ // when no temporary buffer is needed, but that's just to make it compile: it is never used.
+ const auto temp_buffer = (temp_size > 0) ? Buffer<T>(context_, temp_size) : a_buffer;
+
+ // Sets the buffer pointers for (temp) matrices A, B, and C
+ const auto a_temp = (a_no_temp) ? a_buffer : temp_buffer;
+ const auto b_temp = (b_no_temp) ? b_buffer : temp_buffer;
+ const auto c_temp = (c_no_temp) ? c_buffer : temp_buffer;
// Events of all kernels (including pre/post processing kernels)
auto eventWaitList = std::vector<Event>();
@@ -188,7 +202,7 @@ void Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
auto eventProcessB = Event();
PadCopyTransposeMatrix(queue_, device_, db_, eventProcessB.pointer(), emptyEventList,
b_one, b_two, b_ld, b_offset, b_buffer,
- b_one_i, b_two_i, b_one_i, 0, b_temp,
+ b_one_i, b_two_i, b_one_i, b_temp_offset, b_temp,
ConstantOne<T>(), program_,
true, b_do_transpose, b_conjugate);
eventWaitList.push_back(eventProcessB);
@@ -199,7 +213,7 @@ void Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
auto eventProcessC = Event();
PadCopyTransposeMatrix(queue_, device_, db_, eventProcessC.pointer(), emptyEventList,
c_one, c_two, c_ld, c_offset, c_buffer,
- c_one_i, c_two_i, c_one_i, 0, c_temp,
+ c_one_i, c_two_i, c_one_i, c_temp_offset, c_temp,
ConstantOne<T>(), program_,
true, c_do_transpose, false);
eventWaitList.push_back(eventProcessC);
@@ -217,6 +231,8 @@ void Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
kernel.SetArgument(5, a_temp());
kernel.SetArgument(6, b_temp());
kernel.SetArgument(7, c_temp());
+ kernel.SetArgument(8, static_cast<int>(b_temp_offset / db_["VWN"]));
+ kernel.SetArgument(9, static_cast<int>(c_temp_offset / db_["VWM"]));
// Computes the global and local thread sizes
const auto global = std::vector<size_t>{
@@ -234,7 +250,7 @@ void Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
if (!c_no_temp) {
eventWaitList.push_back(eventKernel);
PadCopyTransposeMatrix(queue_, device_, db_, event_, eventWaitList,
- c_one_i, c_two_i, c_one_i, 0, c_temp,
+ c_one_i, c_two_i, c_one_i, c_temp_offset, c_temp,
c_one, c_two, c_ld, c_offset, c_buffer,
ConstantOne<T>(), program_,
false, c_do_transpose, false);
diff --git a/src/routines/level3/xtrsm.cpp b/src/routines/level3/xtrsm.cpp
index 685d458b..d622e3bf 100644
--- a/src/routines/level3/xtrsm.cpp
+++ b/src/routines/level3/xtrsm.cpp
@@ -73,7 +73,7 @@ void Xtrsm<T>::TrsmColMajor(const Side side, const Triangle triangle,
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld) {
// Settings
- constexpr auto block_size = size_t{32}; // tuneable
+ constexpr auto block_size = size_t{16}; // tuneable
// Makes sure all dimensions are larger than zero
if ((m == 0) || (n == 0)) { throw BLASError(StatusCode::kInvalidDimension); }
@@ -128,18 +128,25 @@ void Xtrsm<T>::TrsmColMajor(const Side side, const Triangle triangle,
for (auto i = size_t{0}; i < m; i += block_size) {
const auto gemm_alpha = (i == 0) ? alpha : ConstantOne<T>();
const auto current_block_size = std::min(m - i, block_size);
- DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
- current_block_size, n, current_block_size, gemm_alpha,
- a_inv_buffer, i * block_size, block_size,
- b_buffer, b_offset + i, b_ld, ConstantZero<T>(),
- x_buffer, x_offset + i, x_ld);
+ auto gemm1_event = Event();
+ auto gemm1 = Xgemm<T>(queue_, gemm1_event.pointer());
+ gemm1.DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
+ current_block_size, n, current_block_size, gemm_alpha,
+ a_inv_buffer, i * block_size, block_size,
+ b_buffer, b_offset + i, b_ld, ConstantZero<T>(),
+ x_buffer, x_offset + i, x_ld);
+ gemm1_event.WaitForCompletion();
if (i + block_size >= m) { break; }
+
const auto this_a_offset = (a_transpose == Transpose::kNo) ? (i + block_size) + i * a_ld : i + (block_size + i) * a_ld;
- DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
- m - i - block_size, n, block_size, ConstantNegOne<T>(),
- a_buffer, this_a_offset, a_ld,
- x_buffer, x_offset + i, x_ld, gemm_alpha,
- b_buffer, b_offset + i + block_size, b_ld);
+ auto gemm2_event = Event();
+ auto gemm2 = Xgemm<T>(queue_, gemm2_event.pointer());
+ gemm2.DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
+ m - i - block_size, n, block_size, ConstantNegOne<T>(),
+ a_buffer, this_a_offset + a_offset, a_ld,
+ x_buffer, x_offset + i, x_ld, gemm_alpha,
+ b_buffer, b_offset + i + block_size, b_ld);
+ gemm2_event.WaitForCompletion();
}
}
@@ -150,18 +157,25 @@ void Xtrsm<T>::TrsmColMajor(const Side side, const Triangle triangle,
for (auto i = i_start; i >= 0; i -= static_cast<int>(block_size)) {
const auto current_block_size = (i == i_start) ? special_block_size : block_size;
const auto gemm_alpha = (i == i_start) ? alpha : ConstantOne<T>();
- DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
- current_block_size, n, current_block_size, gemm_alpha,
- a_inv_buffer, i * block_size, block_size,
- b_buffer, b_offset + i, b_ld, ConstantZero<T>(),
- x_buffer, x_offset + i, x_ld);
+ auto gemm1_event = Event();
+ auto gemm1 = Xgemm<T>(queue_, gemm1_event.pointer());
+ gemm1.DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
+ current_block_size, n, current_block_size, gemm_alpha,
+ a_inv_buffer, i * block_size, block_size,
+ b_buffer, b_offset + i, b_ld, ConstantZero<T>(),
+ x_buffer, x_offset + i, x_ld);
+ gemm1_event.WaitForCompletion();
if (i - static_cast<int>(block_size) < 0) { break; }
+
const auto this_a_offset = (a_transpose == Transpose::kNo) ? i * a_ld : i;
- DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
- i, n, current_block_size, ConstantNegOne<T>(),
- a_buffer, this_a_offset, a_ld,
- x_buffer, x_offset + i, x_ld, gemm_alpha,
- b_buffer, b_offset, b_ld);
+ auto gemm2_event = Event();
+ auto gemm2 = Xgemm<T>(queue_, gemm2_event.pointer());
+ gemm2.DoGemm(Layout::kColMajor, a_transpose, Transpose::kNo,
+ i, n, current_block_size, ConstantNegOne<T>(),
+ a_buffer, this_a_offset + a_offset, a_ld,
+ x_buffer, x_offset + i, x_ld, gemm_alpha,
+ b_buffer, b_offset, b_ld);
+ gemm2_event.WaitForCompletion();
}
}
}
@@ -176,18 +190,25 @@ void Xtrsm<T>::TrsmColMajor(const Side side, const Triangle triangle,
for (auto i = i_start; i >= 0; i -= static_cast<int>(block_size)) {
const auto current_block_size = (i == i_start) ? special_block_size : block_size;
const auto gemm_alpha = (i == i_start) ? alpha : ConstantOne<T>();
- DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
- m, current_block_size, current_block_size, gemm_alpha,
- b_buffer, b_offset + i * b_ld, b_ld,
- a_inv_buffer, i * block_size, block_size, ConstantZero<T>(),
- x_buffer, x_offset + i * x_ld, x_ld);
+ auto gemm1_event = Event();
+ auto gemm1 = Xgemm<T>(queue_, gemm1_event.pointer());
+ gemm1.DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
+ m, current_block_size, current_block_size, gemm_alpha,
+ b_buffer, b_offset + i * b_ld, b_ld,
+ a_inv_buffer, i * block_size, block_size, ConstantZero<T>(),
+ x_buffer, x_offset + i * x_ld, x_ld);
+ gemm1_event.WaitForCompletion();
if (i - static_cast<int>(block_size) < 0) { break; }
+
const auto this_a_offset = (a_transpose == Transpose::kNo) ? i : i * a_ld;
- DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
- m, i, current_block_size, ConstantNegOne<T>(),
- x_buffer, x_offset + i * x_ld, x_ld,
- a_buffer, this_a_offset, a_ld, gemm_alpha,
- b_buffer, b_offset, b_ld);
+ auto gemm2_event = Event();
+ auto gemm2 = Xgemm<T>(queue_, gemm2_event.pointer());
+ gemm2.DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
+ m, i, current_block_size, ConstantNegOne<T>(),
+ x_buffer, x_offset + i * x_ld, x_ld,
+ a_buffer, this_a_offset + a_offset, a_ld, gemm_alpha,
+ b_buffer, b_offset, b_ld);
+ gemm2_event.WaitForCompletion();
}
}
@@ -196,18 +217,25 @@ void Xtrsm<T>::TrsmColMajor(const Side side, const Triangle triangle,
for (auto i = size_t{0}; i < n; i += block_size) {
const auto gemm_alpha = (i == 0) ? alpha : ConstantOne<T>();
const auto current_block_size = std::min(n - i, block_size);
- DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
- m, current_block_size, current_block_size, gemm_alpha,
- b_buffer, b_offset + i * b_ld, b_ld,
- a_inv_buffer, i * block_size, block_size, ConstantZero<T>(),
- x_buffer, x_offset + i * x_ld, x_ld);
+ auto gemm1_event = Event();
+ auto gemm1 = Xgemm<T>(queue_, gemm1_event.pointer());
+ gemm1.DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
+ m, current_block_size, current_block_size, gemm_alpha,
+ b_buffer, b_offset + i * b_ld, b_ld,
+ a_inv_buffer, i * block_size, block_size, ConstantZero<T>(),
+ x_buffer, x_offset + i * x_ld, x_ld);
+ gemm1_event.WaitForCompletion();
if (i + block_size >= n) { break; }
+
const auto this_a_offset = (a_transpose == Transpose::kNo) ? i + (block_size + i) * a_ld : (i + block_size) + i * a_ld;
- DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
- m, n - i - block_size, block_size, ConstantNegOne<T>(),
- x_buffer, x_offset + i * x_ld, x_ld,
- a_buffer, this_a_offset, a_ld, gemm_alpha,
- b_buffer, b_offset + (i + block_size) * b_ld, b_ld);
+ auto gemm2_event = Event();
+ auto gemm2 = Xgemm<T>(queue_, gemm2_event.pointer());
+ gemm2.DoGemm(Layout::kColMajor, Transpose::kNo, a_transpose,
+ m, n - i - block_size, block_size, ConstantNegOne<T>(),
+ x_buffer, x_offset + i * x_ld, x_ld,
+ a_buffer, this_a_offset + a_offset, a_ld, gemm_alpha,
+ b_buffer, b_offset + (i + block_size) * b_ld, b_ld);
+ gemm2_event.WaitForCompletion();
}
}
}
diff --git a/src/routines/levelx/xaxpybatched.cpp b/src/routines/levelx/xaxpybatched.cpp
index 0b755ccf..52c27b78 100644
--- a/src/routines/levelx/xaxpybatched.cpp
+++ b/src/routines/levelx/xaxpybatched.cpp
@@ -59,9 +59,9 @@ void XaxpyBatched<T>::DoAxpyBatched(const size_t n, const std::vector<T> &alphas
x_offsets_int[batch] = static_cast<int>(x_offsets[batch]);
y_offsets_int[batch] = static_cast<int>(y_offsets[batch]);
}
- auto x_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto y_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto alphas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
+ auto x_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
+ auto y_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
+ auto alphas_device = Buffer<T>(context_, BufferAccess::kReadWrite, batch_count);
x_offsets_device.Write(queue_, batch_count, x_offsets_int);
y_offsets_device.Write(queue_, batch_count, y_offsets_int);
alphas_device.Write(queue_, batch_count, alphas);
diff --git a/src/routines/levelx/xgemmbatched.cpp b/src/routines/levelx/xgemmbatched.cpp
index 4e9f0004..8a015e97 100644
--- a/src/routines/levelx/xgemmbatched.cpp
+++ b/src/routines/levelx/xgemmbatched.cpp
@@ -100,8 +100,8 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans
}
// Upload the scalar arguments to the device
- auto alphas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
- auto betas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
+ auto alphas_device = Buffer<T>(context_, BufferAccess::kReadWrite, batch_count);
+ auto betas_device = Buffer<T>(context_, BufferAccess::kReadWrite, batch_count);
alphas_device.Write(queue_, batch_count, alphas);
betas_device.Write(queue_, batch_count, betas);
@@ -200,8 +200,8 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
// to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In
// case nothing has to be done, these kernels can be skipped.
if (!a_no_temp) {
- auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto a_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
+ auto a_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
a_offsets_device.Write(queue_, batch_count, a_offsets);
a_offsets_i_device.Write(queue_, batch_count, a_offsets_i);
auto eventProcessA = Event();
@@ -214,8 +214,8 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
// As above, but now for matrix B
if (!b_no_temp) {
- auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto b_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
+ auto b_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
b_offsets_device.Write(queue_, batch_count, b_offsets);
b_offsets_i_device.Write(queue_, batch_count, b_offsets_i);
auto eventProcessB = Event();
@@ -227,8 +227,8 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
}
// As above, but now for matrix C
- auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto c_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
+ auto c_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
if (!c_no_temp) {
c_offsets_device.Write(queue_, batch_count, c_offsets);
c_offsets_i_device.Write(queue_, batch_count, c_offsets_i);
@@ -297,9 +297,9 @@ void XgemmBatched<T>::BatchedGemmDirect(const size_t m, const size_t n, const si
const size_t batch_count) {
// Uploads the offsets to the device
- auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
+ auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
+ auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadWrite, batch_count);
a_offsets_device.Write(queue_, batch_count, a_offsets);
b_offsets_device.Write(queue_, batch_count, b_offsets);
c_offsets_device.Write(queue_, batch_count, c_offsets);
diff --git a/src/routines/routines.hpp b/src/routines/routines.hpp
new file mode 100644
index 00000000..9e7768b9
--- /dev/null
+++ b/src/routines/routines.hpp
@@ -0,0 +1,76 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file contains all the includes of all the routines in CLBlast.
+//
+// =================================================================================================
+
+#ifndef CLBLAST_ROUTINES_ROUTINES_H_
+#define CLBLAST_ROUTINES_ROUTINES_H_
+
+// BLAS level-1 includes
+#include "routines/level1/xswap.hpp"
+#include "routines/level1/xscal.hpp"
+#include "routines/level1/xcopy.hpp"
+#include "routines/level1/xaxpy.hpp"
+#include "routines/level1/xdot.hpp"
+#include "routines/level1/xdotu.hpp"
+#include "routines/level1/xdotc.hpp"
+#include "routines/level1/xnrm2.hpp"
+#include "routines/level1/xasum.hpp"
+#include "routines/level1/xsum.hpp" // non-BLAS routine
+#include "routines/level1/xamax.hpp"
+#include "routines/level1/xamin.hpp" // non-BLAS routine
+#include "routines/level1/xmax.hpp" // non-BLAS routine
+#include "routines/level1/xmin.hpp" // non-BLAS routine
+
+// BLAS level-2 includes
+#include "routines/level2/xgemv.hpp"
+#include "routines/level2/xgbmv.hpp"
+#include "routines/level2/xhemv.hpp"
+#include "routines/level2/xhbmv.hpp"
+#include "routines/level2/xhpmv.hpp"
+#include "routines/level2/xsymv.hpp"
+#include "routines/level2/xsbmv.hpp"
+#include "routines/level2/xspmv.hpp"
+#include "routines/level2/xtrmv.hpp"
+#include "routines/level2/xtbmv.hpp"
+#include "routines/level2/xtpmv.hpp"
+#include "routines/level2/xtrsv.hpp"
+#include "routines/level2/xger.hpp"
+#include "routines/level2/xgeru.hpp"
+#include "routines/level2/xgerc.hpp"
+#include "routines/level2/xher.hpp"
+#include "routines/level2/xhpr.hpp"
+#include "routines/level2/xher2.hpp"
+#include "routines/level2/xhpr2.hpp"
+#include "routines/level2/xsyr.hpp"
+#include "routines/level2/xspr.hpp"
+#include "routines/level2/xsyr2.hpp"
+#include "routines/level2/xspr2.hpp"
+
+// BLAS level-3 includes
+#include "routines/level3/xgemm.hpp"
+#include "routines/level3/xsymm.hpp"
+#include "routines/level3/xhemm.hpp"
+#include "routines/level3/xsyrk.hpp"
+#include "routines/level3/xherk.hpp"
+#include "routines/level3/xsyr2k.hpp"
+#include "routines/level3/xher2k.hpp"
+#include "routines/level3/xtrmm.hpp"
+#include "routines/level3/xtrsm.hpp"
+
+// Level-x includes (non-BLAS)
+#include "routines/levelx/xomatcopy.hpp"
+#include "routines/levelx/xim2col.hpp"
+#include "routines/levelx/xaxpybatched.hpp"
+#include "routines/levelx/xgemmbatched.hpp"
+
+// CLBLAST_ROUTINES_ROUTINES_H_
+#endif
diff --git a/src/tuning/kernels/copy_fast.cpp b/src/tuning/kernels/copy_fast.cpp
index c9bf478c..068c5f1b 100644
--- a/src/tuning/kernels/copy_fast.cpp
+++ b/src/tuning/kernels/copy_fast.cpp
@@ -25,70 +25,64 @@ template <typename T>
class TuneCopy {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return "copy"; }
- static std::string KernelName() { return "CopyMatrixFast"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level3/level3.opencl"
- #include "../src/kernels/level3/copy_fast.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgM, kArgN, kArgAlpha}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "copy";
+ settings.kernel_name = "CopyMatrixFast";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/copy_fast.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"COPY_DIMX", "COPY_DIMY"}};
+ settings.div_global = {{"COPY_VW", "COPY_WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"COPY_DIMX", {8, 16, 32}},
+ {"COPY_DIMY", {8, 16, 32}},
+ {"COPY_WPT", {1, 2, 4, 8}},
+ {"COPY_VW", {1, 2, 4, 8}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+ }
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1024; }
- static size_t DefaultN() { return 1024; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; }// N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}// N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
- tuner.AddParameter(id, "COPY_DIMX", {8, 16, 32});
- tuner.AddParameter(id, "COPY_DIMY", {8, 16, 32});
- tuner.AddParameter(id, "COPY_WPT", {1, 2, 4, 8});
- tuner.AddParameter(id, "COPY_VW", {1, 2, 4, 8});
- }
-
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1, 1}; }
- static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"COPY_DIMX", "COPY_DIMY"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {}; }
- static TransformVector DivGlobal() { return {{"COPY_VW", "COPY_WPT"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
@@ -99,17 +93,6 @@ class TuneCopy {
tuner.AddArgumentOutput(b_mat);
tuner.AddArgumentScalar(GetRealArg(args.alpha));
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return 2 * args.m * args.n * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return "GB/s"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/copy_pad.cpp b/src/tuning/kernels/copy_pad.cpp
index 23f52d75..7102d05d 100644
--- a/src/tuning/kernels/copy_pad.cpp
+++ b/src/tuning/kernels/copy_pad.cpp
@@ -25,70 +25,64 @@ template <typename T>
class TunePad {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return "pad"; }
- static std::string KernelName() { return "CopyPadMatrix"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level3/level3.opencl"
- #include "../src/kernels/level3/copy_pad.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgM, kArgN, kArgAlpha}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "pad";
+ settings.kernel_name = "CopyPadMatrix";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/copy_pad.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"PAD_DIMX", "PAD_DIMY"}};
+ settings.div_global = {{"PAD_WPTX", "PAD_WPTY"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"PAD_DIMX", {8, 16, 32}},
+ {"PAD_DIMY", {8, 16, 32}},
+ {"PAD_WPTX", {1, 2, 4}},
+ {"PAD_WPTY", {1, 2, 4}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+ }
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1024; }
- static size_t DefaultN() { return 1024; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; }// N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}// N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
- tuner.AddParameter(id, "PAD_DIMX", {8, 16, 32});
- tuner.AddParameter(id, "PAD_DIMY", {8, 16, 32});
- tuner.AddParameter(id, "PAD_WPTX", {1, 2, 4});
- tuner.AddParameter(id, "PAD_WPTY", {1, 2, 4});
- }
-
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1, 1}; }
- static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"PAD_DIMX", "PAD_DIMY"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {}; }
- static TransformVector DivGlobal() { return {{"PAD_WPTX", "PAD_WPTY"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
@@ -107,17 +101,6 @@ class TunePad {
tuner.AddArgumentScalar(GetRealArg(args.alpha));
tuner.AddArgumentScalar(0);
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return 2 * args.m * args.n * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return "GB/s"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/transpose_fast.cpp b/src/tuning/kernels/transpose_fast.cpp
index 308663d8..56726903 100644
--- a/src/tuning/kernels/transpose_fast.cpp
+++ b/src/tuning/kernels/transpose_fast.cpp
@@ -25,53 +25,60 @@ template <typename T>
class TuneTranspose {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return "transpose"; }
- static std::string KernelName() { return "TransposeMatrixFast"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level3/level3.opencl"
- #include "../src/kernels/level3/transpose_fast.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgM, kArgN, kArgAlpha}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "transpose";
+ settings.kernel_name = "TransposeMatrixFast";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/transpose_fast.opencl"
+ ;
- // Tests for valid arguments
- static void TestValidArguments(const Arguments<T> &) { }
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"TRA_DIM", "TRA_DIM"}};
+ settings.div_global = {{"TRA_WPT", "TRA_WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"TRA_DIM", {4, 8, 16, 32, 64}},
+ {"TRA_WPT", {1, 2, 4, 8, 16}},
+ {"TRA_PAD", {0, 1}},
+ {"TRA_SHUFFLE", {0, 1}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1024; }
- static size_t DefaultN() { return 1024; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; }// N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}// N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
- tuner.AddParameter(id, "TRA_DIM", {4, 8, 16, 32, 64});
- tuner.AddParameter(id, "TRA_WPT", {1, 2, 4, 8, 16});
- tuner.AddParameter(id, "TRA_PAD", {0, 1});
- tuner.AddParameter(id, "TRA_SHUFFLE", {0, 1});
+ return settings;
}
+ // Tests for valid arguments
+ static void TestValidArguments(const Arguments<T> &) { }
+
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &tuner, const size_t id, const Arguments<T> &args) {
@@ -81,19 +88,6 @@ class TuneTranspose {
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"TRA_DIM", "TRA_WPT", "TRA_PAD"});
}
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1, 1}; }
- static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"TRA_DIM", "TRA_DIM"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {}; }
- static TransformVector DivGlobal() { return {{"TRA_WPT", "TRA_WPT"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
@@ -104,17 +98,6 @@ class TuneTranspose {
tuner.AddArgumentOutput(b_mat);
tuner.AddArgumentScalar(GetRealArg(args.alpha));
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return 2 * args.m * args.n * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return "GB/s"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/transpose_pad.cpp b/src/tuning/kernels/transpose_pad.cpp
index 304702de..dc46e903 100644
--- a/src/tuning/kernels/transpose_pad.cpp
+++ b/src/tuning/kernels/transpose_pad.cpp
@@ -25,52 +25,59 @@ template <typename T>
class TunePadTranspose {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return "padtranspose"; }
- static std::string KernelName() { return "TransposePadMatrix"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level3/level3.opencl"
- #include "../src/kernels/level3/transpose_pad.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgM, kArgN, kArgAlpha}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "padtranspose";
+ settings.kernel_name = "TransposePadMatrix";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/transpose_pad.opencl"
+ ;
- // Tests for valid arguments
- static void TestValidArguments(const Arguments<T> &) { }
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"PADTRA_TILE", "PADTRA_TILE"}};
+ settings.div_global = {{"PADTRA_WPT", "PADTRA_WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"PADTRA_TILE", {8, 16, 32, 64}},
+ {"PADTRA_WPT", {1, 2, 4, 8, 16}},
+ {"PADTRA_PAD", {0, 1}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1024; }
- static size_t DefaultN() { return 1024; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; }// N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}// N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeB(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
- tuner.AddParameter(id, "PADTRA_TILE", {8, 16, 32, 64});
- tuner.AddParameter(id, "PADTRA_WPT", {1, 2, 4, 8, 16});
- tuner.AddParameter(id, "PADTRA_PAD", {0, 1});
+ return settings;
}
+ // Tests for valid arguments
+ static void TestValidArguments(const Arguments<T> &) { }
+
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &tuner, const size_t id, const Arguments<T> &args) {
@@ -80,19 +87,6 @@ class TunePadTranspose {
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"PADTRA_TILE", "PADTRA_WPT", "PADTRA_PAD"});
}
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1, 1}; }
- static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"PADTRA_TILE", "PADTRA_TILE"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {}; }
- static TransformVector DivGlobal() { return {{"PADTRA_WPT", "PADTRA_WPT"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
@@ -111,17 +105,6 @@ class TunePadTranspose {
tuner.AddArgumentScalar(GetRealArg(args.alpha));
tuner.AddArgumentScalar(0);
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return 2 * args.m * args.n * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return "GB/s"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/xaxpy.cpp b/src/tuning/kernels/xaxpy.cpp
index f8e1d93e..e201949a 100644
--- a/src/tuning/kernels/xaxpy.cpp
+++ b/src/tuning/kernels/xaxpy.cpp
@@ -25,19 +25,54 @@ template <typename T>
class TuneXaxpy {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return "xaxpy"; }
- static std::string KernelName() { return "XaxpyFastest"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level1/level1.opencl"
- #include "../src/kernels/level1/xaxpy.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgN, kArgAlpha};
+ settings.default_n = 4096*1024;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgN, kArgAlpha}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "xaxpy";
+ settings.kernel_name = "XaxpyFastest";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level1/level1.opencl"
+#include "../src/kernels/level1/xaxpy.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_x = args.n;
+ settings.size_y = args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1};
+ settings.local_size_ref = {64};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"WGS"}};
+ settings.div_global = {{"WPT"},{"VW"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"WGS", {64, 128, 256, 512, 1024, 2048}},
+ {"WPT", {1, 2, 4, 8}},
+ {"VW", {1, 2, 4, 8}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 3 * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+ }
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &args) {
@@ -46,52 +81,10 @@ class TuneXaxpy {
}
}
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1; } // N/A for this kernel
- static size_t DefaultN() { return 4096*1024; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; }// N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;} // N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
- static size_t GetSizeY(const Arguments<T> &args) { return args.n; }
- static size_t GetSizeA(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeB(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
- tuner.AddParameter(id, "WGS", {64, 128, 256, 512, 1024, 2048});
- tuner.AddParameter(id, "WPT", {1, 2, 4, 8});
- tuner.AddParameter(id, "VW", {1, 2, 4, 8});
- }
-
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1}; }
- static std::vector<size_t> LocalSizeRef() { return {64}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"WGS"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {}; }
- static TransformVector DivGlobal() { return {{"WPT"},{"VW"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &x_vec, std::vector<T> &y_vec,
@@ -102,17 +95,6 @@ class TuneXaxpy {
tuner.AddArgumentInput(x_vec);
tuner.AddArgumentOutput(y_vec);
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return 3 * args.n * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return "GB/s"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/xdot.cpp b/src/tuning/kernels/xdot.cpp
index c3b5361e..fb532680 100644
--- a/src/tuning/kernels/xdot.cpp
+++ b/src/tuning/kernels/xdot.cpp
@@ -26,66 +26,60 @@ template <typename T, int V>
class TuneXdot {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return "xdot_"+std::to_string(V); }
- static std::string KernelName() { return (V==1) ? "Xdot" : "XdotEpilogue"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level1/xdot.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgN};
+ settings.default_n = 2*1024*1024;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgN}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
- // Tests for valid arguments
- static void TestValidArguments(const Arguments<T> &) { }
+ // Identification of the kernel
+ settings.kernel_family = "xdot_"+std::to_string(V);
+ settings.kernel_name = (V==1) ? "Xdot" : "XdotEpilogue";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level1/xdot.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_x = args.n;
+ settings.size_y = args.n;
+ settings.size_temp = args.n; // Worst case
+
+ // Sets the base thread configuration
+ settings.global_size = (V==1) ? std::vector<size_t>{2*64} : std::vector<size_t>{1};
+ settings.global_size_ref = (V==1) ? std::vector<size_t>{2*64*64} : std::vector<size_t>{64};
+ settings.local_size = {1};
+ settings.local_size_ref = {64};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = (V==1) ? TunerSettings::TransformVector{{"WGS1"}} : TunerSettings::TransformVector{{"WGS2"}};
+ settings.mul_global = (V==1) ? TunerSettings::TransformVector{{"WGS1"}} : TunerSettings::TransformVector{{"WGS2"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {32, 64, 128, 256, 512, 1024}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = (V==1) ? (2*args.n + 1) * GetBytes(args.precision) : 1 * GetBytes(args.precision);
+ settings.performance_unit = (V==1) ? "GB/s" : "N/A";
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1; } // N/A for this kernel
- static size_t DefaultN() { return 2*1024*1024; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; }// N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}// N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
- static size_t GetSizeY(const Arguments<T> &args) { return args.n; }
- static size_t GetSizeA(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeB(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &args) { return args.n; } // Worst case
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
- tuner.AddParameter(id, "WGS"+std::to_string(V), {32, 64, 128, 256, 512, 1024});
+ return settings;
}
+ // Tests for valid arguments
+ static void TestValidArguments(const Arguments<T> &) { }
+
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &) { return (V==1) ? std::vector<size_t>{2*64} : std::vector<size_t>{1}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &) { return (V==1) ? std::vector<size_t>{2*64*64} : std::vector<size_t>{64}; }
- static std::vector<size_t> LocalSize() { return {1}; }
- static std::vector<size_t> LocalSizeRef() { return {64}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return (V==1) ? TransformVector{{"WGS1"}} : TransformVector{{"WGS2"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return (V==1) ? TransformVector{{"WGS1"}} : TransformVector{{"WGS2"}}; }
- static TransformVector DivGlobal() { return {}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &x_vec, std::vector<T> &y_vec,
@@ -108,17 +102,6 @@ class TuneXdot {
tuner.AddArgumentScalar(0);
}
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return (V==1) ? (2*args.n + 1) * GetBytes(args.precision) : 1 * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return (V==1) ? "GB/s" : "N/A"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/xgemm.cpp b/src/tuning/kernels/xgemm.cpp
index fa6b3085..6dcdf68b 100644
--- a/src/tuning/kernels/xgemm.cpp
+++ b/src/tuning/kernels/xgemm.cpp
@@ -27,88 +27,111 @@ template <typename T, int V>
class TuneXgemm {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return (V==1) ? "xgemm_1" : "xgemm_2"; }
- static std::string KernelName() { return "Xgemm"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level3/xgemm_part1.opencl"
- #include "../src/kernels/level3/xgemm_part2.opencl"
- #include "../src/kernels/level3/xgemm_part3.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
+ kArgHeuristicSelection, kArgPsoSwarmSize,
+ kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ settings.default_k = 1024;
+ settings.default_fraction = (V==1) ? 1.0 : 512.0; // test all or sample randomly
+ settings.default_num_runs = 2;
+ settings.default_heuristic = static_cast<size_t>(cltune::SearchMethod::RandomSearch);
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() {
- return {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
- kArgHeuristicSelection, kArgPsoSwarmSize,
- kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
- }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = (V==1) ? "xgemm_1" : "xgemm_2";
+ settings.kernel_name = "Xgemm";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level3/xgemm_part1.opencl"
+#include "../src/kernels/level3/xgemm_part2.opencl"
+#include "../src/kernels/level3/xgemm_part3.opencl"
+ ;
- // Tests for valid arguments
- static void TestValidArguments(const Arguments<T> &) { }
+ // Buffer sizes
+ settings.size_a = args.m * args.k;
+ settings.size_b = args.n * args.k;
+ settings.size_c = args.m * args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1024; }
- static size_t DefaultN() { return 1024; }
- static size_t DefaultK() { return 1024; }
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return (V==1) ? 1.0 : 512.0; } // test all or sample randomly
- static size_t DefaultNumRuns() { return 2; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; }
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }
- static double DefaultInfluenceLocalPSO(){ return 0.3; }
- static double DefaultInfluenceRandomPSO(){ return 0.6; }
- static size_t DefaultHeuristic(){ return static_cast<size_t>(cltune::SearchMethod::RandomSearch); }
- static double DefaultMaxTempAnn(){ return 1.0;}
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.k; }
- static size_t GetSizeB(const Arguments<T> &args) { return args.n * args.k; }
- static size_t GetSizeC(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"MDIMC", "NDIMC"}};
+ settings.mul_global = {{"MDIMC", "NDIMC"}};
+ settings.div_global = {{"MWG", "NWG"}};
+
+ // Sets the tuning parameters and their possible values
if (V==1) { // limited subset of tuning parameters - but explorable exhaustively
- tuner.AddParameter(id, "MWG", {16, 32, 64});
- tuner.AddParameter(id, "NWG", {16, 32, 64});
- tuner.AddParameter(id, "KWG", {32});
- tuner.AddParameter(id, "MDIMC", {8, 16, 32});
- tuner.AddParameter(id, "NDIMC", {8, 16, 32});
- tuner.AddParameter(id, "MDIMA", {8, 16, 32});
- tuner.AddParameter(id, "NDIMB", {8, 16, 32});
- tuner.AddParameter(id, "KWI", {2});
- tuner.AddParameter(id, "VWM", {1, 2, 4});
- tuner.AddParameter(id, "VWN", {1, 2, 4});
- tuner.AddParameter(id, "STRM", {0});
- tuner.AddParameter(id, "STRN", {0});
- tuner.AddParameter(id, "SA", {0, 1});
- tuner.AddParameter(id, "SB", {0, 1});
- } // a lot more tuning parameters - has to be sampled randomly, too much to test all
+ settings.parameters = {
+ {"MWG", {16, 32, 64}},
+ {"NWG", {16, 32, 64}},
+ {"KWG", {32}},
+ {"MDIMC", {8, 16, 32}},
+ {"NDIMC", {8, 16, 32}},
+ {"MDIMA", {8, 16, 32}},
+ {"NDIMB", {8, 16, 32}},
+ {"KWI", {2}},
+ {"VWM", {1, 2, 4}},
+ {"VWN", {1, 2, 4}},
+ {"STRM", {0}},
+ {"STRN", {0}},
+ {"SA", {0, 1}},
+ {"SB", {0, 1}},
+ };
+ }
+ else { // a lot more tuning parameters - has to be sampled randomly, too much to test all
+ settings.parameters = {
+ {"MWG", {16, 32, 64, 128}},
+ {"NWG", {16, 32, 64, 128}},
+ {"KWG", {16, 32}},
+ {"MDIMC", {8, 16, 32}},
+ {"NDIMC", {8, 16, 32}},
+ {"MDIMA", {8, 16, 32}},
+ {"NDIMB", {8, 16, 32}},
+ {"KWI", {2}},
+ {"VWM", {1, 2, 4, 8}},
+ {"VWN", {1, 2, 4, 8}},
+ {"STRM", {0, 1}},
+ {"STRN", {0, 1}},
+ {"SA", {0, 1}},
+ {"SB", {0, 1}},
+ };
+ }
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * args.k;
+ settings.performance_unit = "GFLOPS";
+
+ // Returns which search heuristic to use
+ if (V==1) { settings.heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch); }
else {
- //RANDOM_SEARCH & PSO
- tuner.AddParameter(id, "MWG", {16, 32, 64, 128});
- tuner.AddParameter(id, "NWG", {16, 32, 64, 128});
- tuner.AddParameter(id, "KWG", {16, 32});
- tuner.AddParameter(id, "MDIMC", {8, 16, 32});
- tuner.AddParameter(id, "NDIMC", {8, 16, 32});
- tuner.AddParameter(id, "MDIMA", {8, 16, 32});
- tuner.AddParameter(id, "NDIMB", {8, 16, 32});
- tuner.AddParameter(id, "KWI", {2});
- tuner.AddParameter(id, "VWM", {1, 2, 4, 8});
- tuner.AddParameter(id, "VWN", {1, 2, 4, 8});
- tuner.AddParameter(id, "STRM", {0, 1});
- tuner.AddParameter(id, "STRN", {0, 1});
- tuner.AddParameter(id, "SA", {0, 1});
- tuner.AddParameter(id, "SB", {0, 1});
+ // Use full-search to explore all parameter combinations or another strategy to search only a
+ // part of the parameter values. The fraction is set as a command-line argument.
+ if (args.fraction == 1.0 || args.fraction == 0.0) {
+ settings.heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch);
+ } else {
+ settings.heuristic = args.heuristic_selection;
+ }
}
+ return settings;
}
+ // Tests for valid arguments
+ static void TestValidArguments(const Arguments<T> &) { }
+
// Sets the constraints
static void SetConstraints(cltune::Tuner &tuner, const size_t id) {
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
@@ -144,19 +167,6 @@ class TuneXgemm {
"SB", "KWG", "NWG"});
}
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1, 1}; }
- static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"MDIMC", "NDIMC"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {{"MDIMC", "NDIMC"}}; }
- static TransformVector DivGlobal() { return {{"MWG", "NWG"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
@@ -170,27 +180,9 @@ class TuneXgemm {
tuner.AddArgumentInput(a_mat);
tuner.AddArgumentInput(b_mat);
tuner.AddArgumentOutput(c_mat);
+ tuner.AddArgumentScalar(0);
+ tuner.AddArgumentScalar(0);
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return 2 * args.m * args.n * args.k;
- }
- static std::string PerformanceUnit() { return "GFLOPS"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- if (V==1) { return static_cast<size_t>(cltune::SearchMethod::FullSearch); }
- else {
- // Use full-search to explore all parameter combinations or another strategy to search only a
- // part of the parameter values. The fraction is set as a command-line argument.
- if (args.fraction == 1.0 || args.fraction == 0.0) {
- return static_cast<size_t>(cltune::SearchMethod::FullSearch);
- } else {
- return args.heuristic_selection;
- }
- }
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/xgemm_direct.cpp b/src/tuning/kernels/xgemm_direct.cpp
index 03b40a50..619fb37a 100644
--- a/src/tuning/kernels/xgemm_direct.cpp
+++ b/src/tuning/kernels/xgemm_direct.cpp
@@ -27,78 +27,103 @@ template <typename T, int V>
class TuneXgemmDirect {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return (V==1) ? "xgemm_direct_1" : "xgemm_direct_2"; }
- static std::string KernelName() { return "XgemmDirectTN"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level3/xgemm_direct_part1.opencl"
- #include "../src/kernels/level3/xgemm_direct_part2.opencl"
- #include "../src/kernels/level3/xgemm_direct_part3.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
+ kArgHeuristicSelection, kArgPsoSwarmSize,
+ kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
+ settings.default_m = 256;
+ settings.default_n = 256;
+ settings.default_k = 256;
+ settings.default_fraction = (V==1) ? 1.0 : 32.0; // test all or sample randomly
+ settings.default_num_runs = 4;
+ settings.default_heuristic = static_cast<size_t>(cltune::SearchMethod::RandomSearch);
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() {
- return {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
- kArgHeuristicSelection, kArgPsoSwarmSize,
- kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
- }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = (V==1) ? "xgemm_direct_1" : "xgemm_direct_2";
+ settings.kernel_name = "XgemmDirectTN";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level3/xgemm_direct_part1.opencl"
+#include "../src/kernels/level3/xgemm_direct_part2.opencl"
+#include "../src/kernels/level3/xgemm_direct_part3.opencl"
+ ;
- // Tests for valid arguments
- static void TestValidArguments(const Arguments<T> &) { }
+ // Buffer sizes
+ settings.size_a = args.m * args.k;
+ settings.size_b = args.n * args.k;
+ settings.size_c = args.m * args.n;
- // Sets the default values for the arguments
- static size_t DefaultM() { return 256; }
- static size_t DefaultN() { return 256; }
- static size_t DefaultK() { return 256; }
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return (V==1) ? 1.0 : 32.0; } // test all or sample randomly
- static size_t DefaultNumRuns() { return 4; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; }
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }
- static double DefaultInfluenceLocalPSO(){ return 0.3; }
- static double DefaultInfluenceRandomPSO(){ return 0.6; }
- static size_t DefaultHeuristic(){ return static_cast<size_t>(cltune::SearchMethod::RandomSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeY(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.k; }
- static size_t GetSizeB(const Arguments<T> &args) { return args.n * args.k; }
- static size_t GetSizeC(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"MDIMCD", "NDIMCD"}};
+ settings.mul_global = {{"MDIMCD", "NDIMCD"}};
+ settings.div_global = {{"WGD", "WGD"}};
+
+ // Sets the tuning parameters and their possible values
if (V==1) { // limited subset of tuning parameters - but explorable exhaustively
- tuner.AddParameter(id, "WGD", {8, 16, 32});
- tuner.AddParameter(id, "MDIMCD", {8, 16, 32});
- tuner.AddParameter(id, "NDIMCD", {8, 16, 32});
- tuner.AddParameter(id, "MDIMAD", {8, 16, 32});
- tuner.AddParameter(id, "NDIMBD", {8, 16, 32});
- tuner.AddParameter(id, "KWID", {2});
- tuner.AddParameter(id, "VWMD", {1, 2, 4, 8});
- tuner.AddParameter(id, "VWND", {1, 2, 4, 8});
- tuner.AddParameter(id, "PADA", {1});
- tuner.AddParameter(id, "PADB", {1});
- } // a lot more tuning parameters - has to be sampled randomly, too much to test all
+ settings.parameters = {
+ {"WGD", {8, 16, 32}},
+ {"MDIMCD", {8, 16, 32}},
+ {"NDIMCD", {8, 16, 32}},
+ {"MDIMAD", {8, 16, 32}},
+ {"NDIMBD", {8, 16, 32}},
+ {"KWID", {2}},
+ {"VWMD", {1, 2, 4, 8}},
+ {"VWND", {1, 2, 4, 8}},
+ {"PADA", {1}},
+ {"PADB", {1}},
+ };
+ }
+ else { // a lot more tuning parameters - has to be sampled randomly, too much to test all
+ settings.parameters = {
+ {"WGD", {8, 16, 32, 64, 128}},
+ {"MDIMCD", {8, 16, 32}},
+ {"NDIMCD", {8, 16, 32}},
+ {"MDIMAD", {8, 16, 32}},
+ {"NDIMBD", {8, 16, 32}},
+ {"KWID", {2, 8, 16}},
+ {"VWMD", {1, 2, 4, 8}},
+ {"VWND", {1, 2, 4, 8}},
+ {"PADA", {0, 1}},
+ {"PADB", {0, 1}},
+ };
+ }
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * args.k;
+ settings.performance_unit = "GFLOPS";
+
+ // Returns which search heuristic to use
+ if (V==1) { settings.heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch); }
else {
- tuner.AddParameter(id, "WGD", {8, 16, 32, 64, 128});
- tuner.AddParameter(id, "MDIMCD", {8, 16, 32});
- tuner.AddParameter(id, "NDIMCD", {8, 16, 32});
- tuner.AddParameter(id, "MDIMAD", {8, 16, 32});
- tuner.AddParameter(id, "NDIMBD", {8, 16, 32});
- tuner.AddParameter(id, "KWID", {2, 8, 16});
- tuner.AddParameter(id, "VWMD", {1, 2, 4, 8});
- tuner.AddParameter(id, "VWND", {1, 2, 4, 8});
- tuner.AddParameter(id, "PADA", {0, 1});
- tuner.AddParameter(id, "PADB", {0, 1});
+ // Use full-search to explore all parameter combinations or another strategy to search only a
+ // part of the parameter values. The fraction is set as a command-line argument.
+ if (args.fraction == 1.0 || args.fraction == 0.0) {
+ settings.heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch);
+ } else {
+ settings.heuristic = args.heuristic_selection;
+ }
}
+
+ return settings;
}
+ // Tests for valid arguments
+ static void TestValidArguments(const Arguments<T> &) { }
+
// Sets the constraints
static void SetConstraints(cltune::Tuner &tuner, const size_t id) {
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
@@ -132,19 +157,6 @@ class TuneXgemmDirect {
tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"WGD", "PADA", "PADB"});
}
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1, 1}; }
- static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"MDIMCD", "NDIMCD"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {{"MDIMCD", "NDIMCD"}}; }
- static TransformVector DivGlobal() { return {{"WGD", "WGD"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &, std::vector<T> &,
@@ -168,26 +180,6 @@ class TuneXgemmDirect {
tuner.AddArgumentScalar(0); // a_conjugate
tuner.AddArgumentScalar(0); // b_conjugate
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return 2 * args.m * args.n * args.k;
- }
- static std::string PerformanceUnit() { return "GFLOPS"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- if (V==1) { return static_cast<size_t>(cltune::SearchMethod::FullSearch); }
- else {
- // Use full-search to explore all parameter combinations or another strategy to search only a
- // part of the parameter values. The fraction is set as a command-line argument.
- if (args.fraction == 1.0 || args.fraction == 0.0) {
- return static_cast<size_t>(cltune::SearchMethod::FullSearch);
- } else {
- return args.heuristic_selection;
- }
- }
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/xgemv.cpp b/src/tuning/kernels/xgemv.cpp
index 00115b6c..e66b15f1 100644
--- a/src/tuning/kernels/xgemv.cpp
+++ b/src/tuning/kernels/xgemv.cpp
@@ -28,63 +28,77 @@ template <typename T, int V>
class TuneXgemv {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return (V==1) ? "xgemv" : ((V==2) ? "xgemv_fast" : "xgemv_fast_rot"); }
- static std::string KernelName() { return (V==1) ? "Xgemv" : ((V==2) ? "XgemvFast" : "XgemvFastRot"); }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level2/xgemv.opencl"
- #include "../src/kernels/level2/xgemv_fast.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha, kArgBeta};
+ settings.default_m = 2048;
+ settings.default_n = 2048;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgM, kArgN, kArgAlpha, kArgBeta}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = (V==1) ? "xgemv" : ((V==2) ? "xgemv_fast" : "xgemv_fast_rot");
+ settings.kernel_name = (V==1) ? "Xgemv" : ((V==2) ? "XgemvFast" : "XgemvFastRot");
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level2/xgemv.opencl"
+#include "../src/kernels/level2/xgemv_fast.opencl"
+ ;
- // Tests for valid arguments
- static void TestValidArguments(const Arguments<T> &) { }
+ // Buffer sizes
+ settings.size_x = args.n;
+ settings.size_y = args.m;
+ settings.size_a = args.m * args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1};
+ settings.local_size_ref = {64};
- // Sets the default values for the arguments
- static size_t DefaultM() { return 2048; }
- static size_t DefaultN() { return 2048; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; }// N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}// N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &args) { return args.n; }
- static size_t GetSizeY(const Arguments<T> &args) { return args.m; }
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeB(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"WGS"+std::to_string(V)}};
+ settings.div_global = (V==1 || V==2) ?
+ TunerSettings::TransformVector{{"WPT"+std::to_string(V)}} :
+ TunerSettings::TransformVector{};
+
+ // Sets the tuning parameters and their possible values
if (V==1) {
- tuner.AddParameter(id, "WGS"+std::to_string(V), {32, 64, 128, 256});
- tuner.AddParameter(id, "WPT"+std::to_string(V), {1, 2, 4});
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {32, 64, 128, 256}},
+ {"WPT"+std::to_string(V), {1, 2, 4}},
+ };
}
if (V==2) {
- tuner.AddParameter(id, "WGS"+std::to_string(V), {16, 32, 64, 128, 256});
- tuner.AddParameter(id, "WPT"+std::to_string(V), {1, 2, 4});
- tuner.AddParameter(id, "VW"+std::to_string(V), {1, 2, 4, 8});
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {16, 32, 64, 128, 256}},
+ {"WPT"+std::to_string(V), {1, 2, 4}},
+ {"VW"+std::to_string(V), {1, 2, 4, 8}},
+ };
}
if (V==3) {
- tuner.AddParameter(id, "WGS"+std::to_string(V), {16, 32, 64, 128});
- tuner.AddParameter(id, "WPT"+std::to_string(V), {1, 2, 4, 8, 16, 32});
- tuner.AddParameter(id, "VW"+std::to_string(V), {1, 2, 4, 8});
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {16, 32, 64, 128}},
+ {"WPT"+std::to_string(V), {1, 2, 4, 8, 16, 32}},
+ {"VW"+std::to_string(V), {1, 2, 4, 8}},
+ };
}
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = (args.m*args.n + 2*args.m + args.n) * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
}
+ // Tests for valid arguments
+ static void TestValidArguments(const Arguments<T> &) { }
+
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &tuner, const size_t id) {
if (V==2 || V==3) {
@@ -107,22 +121,6 @@ class TuneXgemv {
}
}
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1}; }
- static std::vector<size_t> LocalSizeRef() { return {64}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"WGS"+std::to_string(V)}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {}; }
- static TransformVector DivGlobal() {
- if (V==1 || V==2) return {{"WPT"+std::to_string(V)}};
- return {};
- }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &x_vec, std::vector<T> &y_vec,
@@ -148,17 +146,6 @@ class TuneXgemv {
tuner.AddArgumentScalar(0); // Banded 'kl'
tuner.AddArgumentScalar(0); // Banded 'ku'
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return (args.m*args.n + 2*args.m + args.n) * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return "GB/s"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/kernels/xger.cpp b/src/tuning/kernels/xger.cpp
index 14a98761..c2eb1d31 100644
--- a/src/tuning/kernels/xger.cpp
+++ b/src/tuning/kernels/xger.cpp
@@ -25,69 +25,64 @@ template <typename T>
class TuneXger {
public:
- // The representative kernel and the source code
- static std::string KernelFamily() { return "xger"; }
- static std::string KernelName() { return "Xger"; }
- static std::string GetSources() {
- return
- #include "../src/kernels/common.opencl"
- #include "../src/kernels/level2/level2.opencl"
- #include "../src/kernels/level2/xger.opencl"
- ;
+ // Settings for this kernel (default command-line arguments)
+ static TunerDefaults GetTunerDefaults() {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
}
- // The list of arguments relevant for this routine
- static std::vector<std::string> GetOptions() { return {kArgN, kArgM, kArgAlpha}; }
+ // Settings for this kernel (general)
+ static TunerSettings GetTunerSettings(const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "xger";
+ settings.kernel_name = "Xger";
+ settings.sources =
+#include "../src/kernels/common.opencl"
+#include "../src/kernels/level2/level2.opencl"
+#include "../src/kernels/level2/xger.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_x = args.m;
+ settings.size_y = args.n;
+ settings.size_a = args.m * args.n;
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"WGS1", "WGS2"}};
+ settings.div_global = {{"WPT", "WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"WGS1", {4, 8, 16, 32, 64, 128, 256, 512}},
+ {"WGS2", {1, 2, 4, 8, 16, 32, 64, 128, 256}},
+ {"WPT", {1, 2, 4}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = (2*args.m*args.n + args.m + args.n) * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+ }
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
- // Sets the default values for the arguments
- static size_t DefaultM() { return 1024; }
- static size_t DefaultN() { return 1024; }
- static size_t DefaultK() { return 1; } // N/A for this kernel
- static size_t DefaultBatchCount() { return 1; } // N/A for this kernel
- static double DefaultFraction() { return 1.0; } // N/A for this kernel
- static size_t DefaultNumRuns() { return 10; } // run every kernel this many times for averaging
- static size_t DefaultSwarmSizePSO() { return 8; } // N/A for this kernel
- static double DefaultInfluenceGlobalPSO(){ return 0.1; }// N/A for this kernel
- static double DefaultInfluenceLocalPSO(){ return 0.3; } // N/A for this kernel
- static double DefaultInfluenceRandomPSO(){ return 0.6; }// N/A for this kernel
- static size_t DefaultHeuristic(){ return static_cast<size_t> (cltune::SearchMethod::FullSearch);}
- static double DefaultMaxTempAnn(){ return 1.0;}// N/A for this kernel
-
- // Describes how to obtain the sizes of the buffers
- static size_t GetSizeX(const Arguments<T> &args) { return args.m; }
- static size_t GetSizeY(const Arguments<T> &args) { return args.n; }
- static size_t GetSizeA(const Arguments<T> &args) { return args.m * args.n; }
- static size_t GetSizeB(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeC(const Arguments<T> &) { return 1; } // N/A for this kernel
- static size_t GetSizeTemp(const Arguments<T> &) { return 1; } // N/A for this kernel
-
- // Sets the tuning parameters and their possible values
- static void SetParameters(cltune::Tuner &tuner, const size_t id) {
- tuner.AddParameter(id, "WGS1", {4, 8, 16, 32, 64, 128, 256, 512});
- tuner.AddParameter(id, "WGS2", {1, 2, 4, 8, 16, 32, 64, 128, 256});
- tuner.AddParameter(id, "WPT", {1, 2, 4});
- }
-
// Sets the constraints and local memory size
static void SetConstraints(cltune::Tuner &, const size_t) { }
static void SetLocalMemorySize(cltune::Tuner &, const size_t, const Arguments<T> &) { }
- // Sets the base thread configuration
- static std::vector<size_t> GlobalSize(const Arguments<T> &args) { return {args.m, args.n}; }
- static std::vector<size_t> GlobalSizeRef(const Arguments<T> &args) { return GlobalSize(args); }
- static std::vector<size_t> LocalSize() { return {1, 1}; }
- static std::vector<size_t> LocalSizeRef() { return {8, 8}; }
-
- // Transforms the thread configuration based on the parameters
- using TransformVector = std::vector<std::vector<std::string>>;
- static TransformVector MulLocal() { return {{"WGS1", "WGS2"}}; }
- static TransformVector DivLocal() { return {}; }
- static TransformVector MulGlobal() { return {}; }
- static TransformVector DivGlobal() { return {{"WPT", "WPT"}}; }
-
// Sets the kernel's arguments
static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
std::vector<T> &x_vec, std::vector<T> &y_vec,
@@ -107,17 +102,6 @@ class TuneXger {
tuner.AddArgumentScalar(static_cast<int>(args.m)); // a_ld
tuner.AddArgumentScalar(0); // a_is_rowmajor
}
-
- // Describes how to compute the performance metrics
- static size_t GetMetric(const Arguments<T> &args) {
- return (2*args.m*args.n + args.m + args.n) * GetBytes(args.precision);
- }
- static std::string PerformanceUnit() { return "GB/s"; }
-
- // Returns which Heuristic to run
- static size_t GetHeuristic(const Arguments<T> &args){
- return static_cast<size_t> (cltune::SearchMethod::FullSearch);
- }
};
// =================================================================================================
diff --git a/src/tuning/tuning.hpp b/src/tuning/tuning.hpp
index 1f9b6f4f..bc9c0e03 100644
--- a/src/tuning/tuning.hpp
+++ b/src/tuning/tuning.hpp
@@ -18,6 +18,7 @@
#include <vector>
#include <string>
#include <random>
+#include <utility>
#include <cltune.h>
@@ -26,6 +27,73 @@
namespace clblast {
// =================================================================================================
+// Structures for the tuners with all the default settings
+struct TunerDefaults {
+
+ // The list of arguments relevant for this routine
+ std::vector<std::string> options = {};
+
+ // Default sizes
+ size_t default_m = 1;
+ size_t default_n = 1;
+ size_t default_k = 1;
+
+ // Other defaults
+ size_t default_batch_count = 1;
+ size_t default_num_runs = 10; // run every kernel this many times for averaging
+
+ // Search heuristic defaults
+ double default_fraction = 1.0;
+ size_t default_swarm_size_PSO = 8;
+ double default_influence_global_PSO = 0.1;
+ double default_influence_local_PSO = 0.3;
+ double default_influence_random_PSO = 0.6;
+ size_t default_heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch);
+ double default_max_temp_ann = 1.0;
+};
+
+// Structures for the tuners with the remaining settings
+struct TunerSettings {
+
+ // The representative kernel and the source code
+ std::string kernel_family;
+ std::string kernel_name;
+ std::string sources;
+
+ // Describes how to obtain the sizes of the buffers
+ size_t size_x = 1;
+ size_t size_y = 1;
+ size_t size_a = 1;
+ size_t size_b = 1;
+ size_t size_c = 1;
+ size_t size_temp = 1;
+
+ // Sets the base thread configuration
+ std::vector<size_t> global_size = {};
+ std::vector<size_t> global_size_ref = {};
+ std::vector<size_t> local_size = {};
+ std::vector<size_t> local_size_ref = {};
+
+ // Transforms the thread configuration based on the parameters
+ using TransformVector = std::vector<std::vector<std::string>>;
+ TransformVector mul_local = {};
+ TransformVector div_local = {};
+ TransformVector mul_global = {};
+ TransformVector div_global = {};
+
+ // Sets the tuning parameters and their possible values
+ std::vector<std::pair<std::string, std::vector<size_t>>> parameters;
+
+ // Describes how to compute the performance metrics
+ size_t metric_amount = 0;
+ std::string performance_unit = "N/A";
+
+ // Returns which search heuristic to use
+ size_t heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch);
+};
+
+// =================================================================================================
+
// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
// the results. Used for all types of kernel families. Note that this is a header-only function so
// that it is automatically compiled for the various kernels (given as the 'C' template argument).
@@ -34,30 +102,31 @@ void Tuner(int argc, char* argv[]) {
constexpr auto kSeed = 42; // fixed seed for reproducibility
// Sets the parameters and platform/device for which to tune (command-line options)
+ const TunerDefaults defaults = C::GetTunerDefaults();
auto command_line_args = RetrieveCommandLineArguments(argc, argv);
auto help = std::string{"* Options given/available:\n"};
auto args = Arguments<T>{};
args.platform_id = GetArgument(command_line_args, help, kArgPlatform, ConvertArgument(std::getenv("CLBLAST_PLATFORM"), size_t{0}));
args.device_id = GetArgument(command_line_args, help, kArgDevice, ConvertArgument(std::getenv("CLBLAST_DEVICE"), size_t{0}));
args.precision = GetArgument(command_line_args, help, kArgPrecision, Precision::kSingle);
- for (auto &o: C::GetOptions()) {
- if (o == kArgM) { args.m = GetArgument(command_line_args, help, kArgM, C::DefaultM()); }
- if (o == kArgN) { args.n = GetArgument(command_line_args, help, kArgN, C::DefaultN()); }
- if (o == kArgK) { args.k = GetArgument(command_line_args, help, kArgK, C::DefaultK()); }
+ for (auto &o: defaults.options) {
+ if (o == kArgM) { args.m = GetArgument(command_line_args, help, kArgM, defaults.default_m); }
+ if (o == kArgN) { args.n = GetArgument(command_line_args, help, kArgN, defaults.default_n); }
+ if (o == kArgK) { args.k = GetArgument(command_line_args, help, kArgK, defaults.default_k); }
if (o == kArgAlpha) { args.alpha = GetArgument(command_line_args, help, kArgAlpha, GetScalar<T>()); }
if (o == kArgBeta) { args.beta = GetArgument(command_line_args, help, kArgBeta, GetScalar<T>()); }
- if (o == kArgFraction) { args.fraction = GetArgument(command_line_args, help, kArgFraction, C::DefaultFraction()); }
- if (o == kArgBatchCount) { args.batch_count = GetArgument(command_line_args, help, kArgBatchCount, C::DefaultBatchCount()); }
- if (o == kArgHeuristicSelection) {args.heuristic_selection = GetArgument(command_line_args, help, kArgHeuristicSelection, C::DefaultHeuristic()); }
- if (o == kArgPsoSwarmSize) {args.pso_swarm_size = GetArgument(command_line_args, help, kArgPsoSwarmSize , C::DefaultSwarmSizePSO()); }
- if (o == kArgPsoInfGlobal) {args.pso_inf_global = GetArgument(command_line_args, help, kArgPsoInfGlobal, C::DefaultInfluenceGlobalPSO()); }
- if (o == kArgPsoInfLocal) {args.pso_inf_local = GetArgument(command_line_args, help, kArgPsoInfLocal, C::DefaultInfluenceLocalPSO()); }
- if (o == kArgPsoInfRandom) {args.pso_inf_random = GetArgument(command_line_args, help, kArgPsoInfRandom, C::DefaultInfluenceRandomPSO()); }
- if (o == kArgAnnMaxTemp) {args.ann_max_temperature = GetArgument(command_line_args, help, kArgAnnMaxTemp, C::DefaultMaxTempAnn());}
+ if (o == kArgFraction) { args.fraction = GetArgument(command_line_args, help, kArgFraction, defaults.default_fraction); }
+ if (o == kArgBatchCount) { args.batch_count = GetArgument(command_line_args, help, kArgBatchCount, defaults.default_batch_count); }
+ if (o == kArgHeuristicSelection) {args.heuristic_selection = GetArgument(command_line_args, help, kArgHeuristicSelection, defaults.default_heuristic); }
+ if (o == kArgPsoSwarmSize) {args.pso_swarm_size = GetArgument(command_line_args, help, kArgPsoSwarmSize , defaults.default_swarm_size_PSO); }
+ if (o == kArgPsoInfGlobal) {args.pso_inf_global = GetArgument(command_line_args, help, kArgPsoInfGlobal, defaults.default_influence_global_PSO); }
+ if (o == kArgPsoInfLocal) {args.pso_inf_local = GetArgument(command_line_args, help, kArgPsoInfLocal, defaults.default_influence_local_PSO); }
+ if (o == kArgPsoInfRandom) {args.pso_inf_random = GetArgument(command_line_args, help, kArgPsoInfRandom, defaults.default_influence_random_PSO); }
+ if (o == kArgAnnMaxTemp) {args.ann_max_temperature = GetArgument(command_line_args, help, kArgAnnMaxTemp, defaults.default_max_temp_ann); }
}
- const auto num_runs = GetArgument(command_line_args, help, kArgNumRuns, C::DefaultNumRuns());
-
+ const auto num_runs = GetArgument(command_line_args, help, kArgNumRuns, defaults.default_num_runs);
fprintf(stdout, "%s\n", help.c_str());
+ const TunerSettings settings = C::GetTunerSettings(args);
// Tests validity of the given arguments
C::TestValidArguments(args);
@@ -87,12 +156,12 @@ void Tuner(int argc, char* argv[]) {
}
// Creates input buffers with random data
- auto x_vec = std::vector<T>(C::GetSizeX(args));
- auto y_vec = std::vector<T>(C::GetSizeY(args));
- auto a_mat = std::vector<T>(C::GetSizeA(args));
- auto b_mat = std::vector<T>(C::GetSizeB(args));
- auto c_mat = std::vector<T>(C::GetSizeC(args));
- auto temp = std::vector<T>(C::GetSizeTemp(args));
+ auto x_vec = std::vector<T>(settings.size_x);
+ auto y_vec = std::vector<T>(settings.size_y);
+ auto a_mat = std::vector<T>(settings.size_a);
+ auto b_mat = std::vector<T>(settings.size_b);
+ auto c_mat = std::vector<T>(settings.size_c);
+ auto temp = std::vector<T>(settings.size_temp);
std::mt19937 mt(kSeed);
std::uniform_real_distribution<double> dist(kTestDataLowerLimit, kTestDataUpperLimit);
PopulateVector(x_vec, mt, dist);
@@ -105,15 +174,13 @@ void Tuner(int argc, char* argv[]) {
// Initializes the tuner for the chosen device
cltune::Tuner tuner(args.platform_id, args.device_id);
- // Select the search method based on the cmd_line arguments
- // If the tuner does not support the selected choice, Full Search will be returned.
- auto method = C::GetHeuristic(args);
-
+ // Select the search method based on the command-line arguments
+ // If the tuner does not support the selected choice, full search will be returned.
+ auto method = settings.heuristic;
if (method == 1) { tuner.UseRandomSearch(1.0/args.fraction); }
else if (method == 2) { tuner.UseAnnealing(1.0/args.fraction, args.ann_max_temperature); }
- else if (method == 3) {
- tuner.UsePSO(1.0/args.fraction, args.pso_swarm_size, args.pso_inf_global, args.pso_inf_local, args.pso_inf_random);
- }
+ else if (method == 3) { tuner.UsePSO(1.0/args.fraction, args.pso_swarm_size, args.pso_inf_global,
+ args.pso_inf_local, args.pso_inf_random); }
else { tuner.UseFullSearch(); }
// Set extra settings for specific defines. This mimics src/routine.cc.
@@ -127,12 +194,14 @@ void Tuner(int argc, char* argv[]) {
}
// Loads the kernel sources and defines the kernel to tune
- auto sources = defines + C::GetSources();
- auto id = tuner.AddKernelFromString(sources, C::KernelName(), C::GlobalSize(args), C::LocalSize());
- tuner.SetReferenceFromString(sources, C::KernelName(), C::GlobalSizeRef(args), C::LocalSizeRef());
+ auto sources = defines + settings.sources;
+ auto id = tuner.AddKernelFromString(sources, settings.kernel_name, settings.global_size, settings.local_size);
+ tuner.SetReferenceFromString(sources, settings.kernel_name, settings.global_size_ref, settings.local_size_ref);
// Sets the tunable parameters and their possible values
- C::SetParameters(tuner, id);
+ for (const auto &parameter: settings.parameters) {
+ tuner.AddParameter(id, parameter.first, parameter.second);
+ }
C::SetConstraints(tuner, id);
C::SetLocalMemorySize(tuner, id, args);
@@ -141,10 +210,10 @@ void Tuner(int argc, char* argv[]) {
tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
// Modifies the thread-sizes (both global and local) based on the parameters
- for (auto &parameters: C::MulLocal()) { tuner.MulLocalSize(id, parameters); }
- for (auto &parameters: C::DivLocal()) { tuner.DivLocalSize(id, parameters); }
- for (auto &parameters: C::MulGlobal()) { tuner.MulGlobalSize(id, parameters); }
- for (auto &parameters: C::DivGlobal()) { tuner.DivGlobalSize(id, parameters); }
+ for (auto &parameters: settings.mul_local) { tuner.MulLocalSize(id, parameters); }
+ for (auto &parameters: settings.div_local) { tuner.DivLocalSize(id, parameters); }
+ for (auto &parameters: settings.mul_global) { tuner.MulGlobalSize(id, parameters); }
+ for (auto &parameters: settings.div_global) { tuner.DivGlobalSize(id, parameters); }
// Sets the function's arguments
C::SetArguments(tuner, args, x_vec, y_vec, a_mat, b_mat, c_mat, temp);
@@ -160,20 +229,20 @@ void Tuner(int argc, char* argv[]) {
// Also prints the performance of the best-case in terms of GB/s or GFLOPS
if (time_ms != 0.0) {
printf("[ -------> ] %.2lf ms", time_ms);
- printf(" or %.1lf %s\n", C::GetMetric(args)/(time_ms*1.0e6), C::PerformanceUnit().c_str());
+ printf(" or %.1lf %s\n", settings.metric_amount/(time_ms*1.0e6), settings.performance_unit.c_str());
}
// Outputs the results as JSON to disk, including some meta-data
auto precision_string = std::to_string(static_cast<size_t>(args.precision));
auto metadata = std::vector<std::pair<std::string,std::string>>{
- {"kernel_family", C::KernelFamily()},
+ {"kernel_family", settings.kernel_family},
{"precision", precision_string},
{"clblast_device_type", device_type},
{"clblast_device_vendor", device_vendor},
{"clblast_device_architecture", device_architecture},
{"clblast_device_name", device_name}
};
- for (auto &o: C::GetOptions()) {
+ for (auto &o: defaults.options) {
if (o == kArgM) { metadata.push_back({"arg_m", std::to_string(args.m)}); }
if (o == kArgN) { metadata.push_back({"arg_n", std::to_string(args.n)}); }
if (o == kArgK) { metadata.push_back({"arg_k", std::to_string(args.k)}); }
@@ -181,7 +250,7 @@ void Tuner(int argc, char* argv[]) {
if (o == kArgBeta) { metadata.push_back({"arg_beta", ToString(args.beta)}); }
if (o == kArgBatchCount) { metadata.push_back({"arg_batch_count", ToString(args.batch_count)}); }
}
- tuner.PrintJSON("clblast_"+C::KernelFamily()+"_"+precision_string+".json", metadata);
+ tuner.PrintJSON("clblast_" + settings.kernel_family + "_" + precision_string + ".json", metadata);
}
diff --git a/src/utilities/buffer_test.hpp b/src/utilities/buffer_test.hpp
index b5693181..fd071434 100644
--- a/src/utilities/buffer_test.hpp
+++ b/src/utilities/buffer_test.hpp
@@ -15,7 +15,7 @@
#ifndef CLBLAST_BUFFER_TEST_H_
#define CLBLAST_BUFFER_TEST_H_
-#include "clblast.h"
+#include "utilities/utilities.hpp"
namespace clblast {
// =================================================================================================
diff --git a/src/utilities/clblast_exceptions.cpp b/src/utilities/clblast_exceptions.cpp
index 96f10860..32526215 100644
--- a/src/utilities/clblast_exceptions.cpp
+++ b/src/utilities/clblast_exceptions.cpp
@@ -55,7 +55,7 @@ StatusCode DispatchException()
} catch (BLASError &e) {
// no message is printed for invalid argument errors
status = e.status();
- } catch (CLError &e) {
+ } catch (CLCudaAPIError &e) {
message = e.what();
status = static_cast<StatusCode>(e.status());
} catch (RuntimeErrorCode &e) {
diff --git a/src/utilities/clblast_exceptions.hpp b/src/utilities/clblast_exceptions.hpp
index 0d0033b6..a790be9c 100644
--- a/src/utilities/clblast_exceptions.hpp
+++ b/src/utilities/clblast_exceptions.hpp
@@ -16,8 +16,7 @@
#ifndef CLBLAST_EXCEPTIONS_H_
#define CLBLAST_EXCEPTIONS_H_
-#include "clpp11.hpp"
-#include "clblast.h"
+#include "utilities/utilities.hpp"
namespace clblast {
// =================================================================================================
diff --git a/src/utilities/utilities.cpp b/src/utilities/utilities.cpp
index 4b8d5a09..f2574104 100644
--- a/src/utilities/utilities.cpp
+++ b/src/utilities/utilities.cpp
@@ -391,16 +391,9 @@ template <> Precision PrecisionValue<double2>() { return Precision::kComplexDoub
// Returns false is this precision is not supported by the device
template <> bool PrecisionSupported<float>(const Device &) { return true; }
template <> bool PrecisionSupported<float2>(const Device &) { return true; }
-template <> bool PrecisionSupported<double>(const Device &device) {
- return device.HasExtension(kKhronosDoublePrecision);
-}
-template <> bool PrecisionSupported<double2>(const Device &device) {
- return device.HasExtension(kKhronosDoublePrecision);
-}
-template <> bool PrecisionSupported<half>(const Device &device) {
- if (device.Name() == "Mali-T628") { return true; } // supports fp16 but not cl_khr_fp16 officially
- return device.HasExtension(kKhronosHalfPrecision);
-}
+template <> bool PrecisionSupported<double>(const Device &device) { return device.SupportsFP64(); }
+template <> bool PrecisionSupported<double2>(const Device &device) { return device.SupportsFP64(); }
+template <> bool PrecisionSupported<half>(const Device &device) { return device.SupportsFP16(); }
// =================================================================================================
@@ -420,13 +413,17 @@ std::string GetDeviceVendor(const Device& device) {
// Mid-level info
std::string GetDeviceArchitecture(const Device& device) {
auto device_architecture = std::string{""};
- if (device.HasExtension(kKhronosAttributesNVIDIA)) {
+ #ifdef CUDA_API
device_architecture = device.NVIDIAComputeCapability();
- }
- else if (device.HasExtension(kKhronosAttributesAMD)) {
- device_architecture = device.Name(); // Name is architecture for AMD APP and AMD ROCm
- }
- // Note: no else - 'device_architecture' might be the empty string
+ #else
+ if (device.HasExtension(kKhronosAttributesNVIDIA)) {
+ device_architecture = device.NVIDIAComputeCapability();
+ }
+ else if (device.HasExtension(kKhronosAttributesAMD)) {
+ device_architecture = device.Name(); // Name is architecture for AMD APP and AMD ROCm
+ }
+ // Note: no else - 'device_architecture' might be the empty string
+ #endif
for (auto &find_and_replace : device_mapping::kArchitectureNames) { // replacing to common names
if (device_architecture == find_and_replace.first) { device_architecture = find_and_replace.second; }
diff --git a/src/utilities/utilities.hpp b/src/utilities/utilities.hpp
index e45c606c..f56226be 100644
--- a/src/utilities/utilities.hpp
+++ b/src/utilities/utilities.hpp
@@ -21,8 +21,13 @@
#include <complex>
#include <random>
-#include "clpp11.hpp"
-#include "clblast.h"
+#ifdef OPENCL_API
+ #include "clpp11.hpp"
+ #include "clblast.h"
+#elif CUDA_API
+ #include "cupp11.hpp"
+ #include "clblast_cuda.h"
+#endif
#include "clblast_half.h"
#include "utilities/clblast_exceptions.hpp"
#include "utilities/msvc.hpp"
@@ -31,15 +36,13 @@ namespace clblast {
// =================================================================================================
// Shorthands for half-precision
-using half = cl_half; // based on the OpenCL type, which is actually an 'unsigned short'
+using half = unsigned short; // the 'cl_half' OpenCL type is actually an 'unsigned short'
// Shorthands for complex data-types
using float2 = std::complex<float>;
using double2 = std::complex<double>;
// Khronos OpenCL extensions
-const std::string kKhronosHalfPrecision = "cl_khr_fp16";
-const std::string kKhronosDoublePrecision = "cl_khr_fp64";
const std::string kKhronosAttributesAMD = "cl_amd_device_attribute_query";
const std::string kKhronosAttributesNVIDIA = "cl_nv_device_attribute_query";