From df3c9f4a8ab9e82ccc4add15b04da5c1b6172b72 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 8 Oct 2017 21:52:02 +0200 Subject: Moved non-routine-specific API functions and includes to separate files --- CMakeLists.txt | 2 + scripts/generator/generator.py | 4 +- src/api_common.cpp | 169 +++++++++++++++++++++++++++++++++ src/clblast.cpp | 207 +---------------------------------------- src/routines/routines.hpp | 76 +++++++++++++++ 5 files changed, 250 insertions(+), 208 deletions(-) create mode 100644 src/api_common.cpp create mode 100644 src/routines/routines.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 05e7393b..52accbd4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -182,6 +182,7 @@ set(SOURCES src/routines/common.cpp src/utilities/clblast_exceptions.cpp src/utilities/utilities.cpp + src/api_common.cpp src/cache.cpp src/clblast.cpp src/clblast_c.cpp @@ -201,6 +202,7 @@ set(HEADERS # such that they can be discovered by IDEs such as CLion and Visual src/routines/level1/xmin.hpp src/routines/level1/xsum.hpp src/routines/common.hpp + src/routines/routines.hpp src/utilities/buffer_test.hpp src/utilities/clblast_exceptions.hpp src/utilities/device_mapping.hpp diff --git a/scripts/generator/generator.py b/scripts/generator/generator.py index df0eaca0..0d34d7fe 100755 --- a/scripts/generator/generator.py +++ b/scripts/generator/generator.py @@ -42,8 +42,8 @@ FILES = [ "/include/clblast_netlib_c.h", "/src/clblast_netlib_c.cpp", ] -HEADER_LINES = [122, 79, 126, 24, 29, 41, 29, 65, 32] -FOOTER_LINES = [25, 147, 27, 38, 6, 6, 6, 9, 2] +HEADER_LINES = [122, 21, 126, 24, 29, 41, 29, 65, 32] +FOOTER_LINES = [25, 3, 27, 38, 6, 6, 6, 9, 2] HEADER_LINES_DOC = 0 FOOTER_LINES_DOC = 63 diff --git a/src/api_common.cpp b/src/api_common.cpp new file mode 100644 index 00000000..aa7e2b0f --- /dev/null +++ b/src/api_common.cpp @@ -0,0 +1,169 @@ +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements the common (non-OpenCL-specific) functions of the CLBlast API. +// +// ================================================================================================= + +#include + +#include "cache.hpp" +#include "routines/routines.hpp" +#include "clblast.h" + +namespace clblast { +// ================================================================================================= + +// Clears the cache of stored binaries +StatusCode ClearCache() { + try { + ProgramCache::Instance().Invalidate(); + BinaryCache::Instance().Invalidate(); + } catch (...) { return DispatchException(); } + return StatusCode::kSuccess; +} + +template +void FillCacheForPrecision(Queue &queue) { + try { + + // Runs all the level 1 set-up functions + Xswap(queue, nullptr); Xswap(queue, nullptr); + Xswap(queue, nullptr); Xswap(queue, nullptr); + Xscal(queue, nullptr); Xscal(queue, nullptr); + Xcopy(queue, nullptr); Xcopy(queue, nullptr); + Xaxpy(queue, nullptr); Xaxpy(queue, nullptr); + Xdot(queue, nullptr); + Xdotu(queue, nullptr); + Xdotc(queue, nullptr); + Xnrm2(queue, nullptr); Xnrm2(queue, nullptr); + Xasum(queue, nullptr); Xasum(queue, nullptr); + Xsum(queue, nullptr); Xsum(queue, nullptr); + Xamax(queue, nullptr); Xamax(queue, nullptr); + Xmax(queue, nullptr); Xmax(queue, nullptr); + Xmin(queue, nullptr); Xmin(queue, nullptr); + + // Runs all the level 2 set-up functions + Xgemv(queue, nullptr); Xgemv(queue, nullptr); + Xgbmv(queue, nullptr); Xgbmv(queue, nullptr); + Xhemv(queue, nullptr); + Xhbmv(queue, nullptr); + Xhpmv(queue, nullptr); + Xsymv(queue, nullptr); + Xsbmv(queue, nullptr); + Xspmv(queue, nullptr); + Xtrmv(queue, nullptr); Xtrmv(queue, nullptr); + Xtbmv(queue, nullptr); Xtbmv(queue, nullptr); + Xtpmv(queue, nullptr); Xtpmv(queue, nullptr); + Xger(queue, nullptr); + Xgeru(queue, nullptr); + Xgerc(queue, nullptr); + Xher(queue, nullptr); + Xhpr(queue, nullptr); + Xher2(queue, nullptr); + Xhpr2(queue, nullptr); + Xsyr(queue, nullptr); + Xspr(queue, nullptr); + Xsyr2(queue, nullptr); + Xspr2(queue, nullptr); + + // Runs all the level 3 set-up functions + Xgemm(queue, nullptr); Xgemm(queue, nullptr); + Xsymm(queue, nullptr); Xsymm(queue, nullptr); + Xhemm(queue, nullptr); + Xsyrk(queue, nullptr); Xsyrk(queue, nullptr); + Xherk(queue, nullptr); + Xsyr2k(queue, nullptr); Xsyr2k(queue, nullptr); + Xher2k(queue, nullptr); + Xtrmm(queue, nullptr); Xtrmm(queue, nullptr); + + // Runs all the non-BLAS set-up functions + Xomatcopy(queue, nullptr); Xomatcopy(queue, nullptr); + + } catch(const RuntimeErrorCode &e) { + if (e.status() != StatusCode::kNoDoublePrecision && + e.status() != StatusCode::kNoHalfPrecision) { + throw; + } + } +} + +// Fills the cache with all binaries for a specific device +// TODO: Add half-precision FP16 set-up calls +StatusCode FillCache(const RawDeviceID device) { + try { + + // Creates a sample context and queue to match the normal routine calling conventions + auto device_cpp = Device(device); + auto context = Context(device_cpp); + auto queue = Queue(context, device_cpp); + + FillCacheForPrecision(queue); + FillCacheForPrecision(queue); + + } catch (...) { return DispatchException(); } + return StatusCode::kSuccess; +} + +// ================================================================================================= + +// Overrides the tuning parameters for this device-precision-kernel combination +StatusCode OverrideParameters(const RawDeviceID device, const std::string &kernel_name, + const Precision precision, + const std::unordered_map ¶meters) { + try { + + // Retrieves the device name + const auto device_cpp = Device(device); + const auto platform_id = device_cpp.PlatformID(); + const auto device_name = GetDeviceName(device_cpp); + + // Retrieves the current database values to verify whether the new ones are complete + auto in_cache = false; + auto current_database = DatabaseCache::Instance().Get(DatabaseKeyRef{platform_id, device, precision, kernel_name}, &in_cache); + if (!in_cache) { + log_debug("Searching database for kernel '" + kernel_name + "'"); + current_database = Database(device_cpp, kernel_name, precision, {}); + } + + // Verifies the parameters size + const auto current_parameter_names = current_database.GetParameterNames(); + if (current_parameter_names.size() != parameters.size()) { + return StatusCode::kMissingOverrideParameter; + } + + // Retrieves the names and values separately and in the same order as the existing database + auto parameter_values = database::Params{0}; + auto i = size_t{0}; + for (const auto ¤t_param : current_parameter_names) { + if (parameters.find(current_param) == parameters.end()) { + return StatusCode::kMissingOverrideParameter; + } + const auto parameter_value = parameters.at(current_param); + parameter_values[i] = parameter_value; + ++i; + } + + // Creates a small custom database based on the provided parameters + const auto database_device = database::DatabaseDevice{database::kDeviceNameDefault, parameter_values}; + const auto database_architecture = database::DatabaseArchitecture{"default", {database_device}}; + const auto database_vendor = database::DatabaseVendor{database::kDeviceTypeAll, "default", {database_architecture}}; + const auto database_entry = database::DatabaseEntry{kernel_name, precision, current_parameter_names, {database_vendor}}; + const auto database_entries = std::vector{database_entry}; + const auto database = Database(device_cpp, kernel_name, precision, database_entries); + + // Removes the old database entry and stores the new one in the cache + DatabaseCache::Instance().Remove(DatabaseKey{platform_id, device, precision, kernel_name}); + DatabaseCache::Instance().Store(DatabaseKey{platform_id, device, precision, kernel_name}, Database(database)); + + } catch (...) { return DispatchException(); } + return StatusCode::kSuccess; +} + +// ================================================================================================= +} // namespace clblast diff --git a/src/clblast.cpp b/src/clblast.cpp index 9f865a23..7d2c2cef 100644 --- a/src/clblast.cpp +++ b/src/clblast.cpp @@ -15,67 +15,9 @@ #include -#include "cache.hpp" +#include "routines/routines.hpp" #include "clblast.h" -// BLAS level-1 includes -#include "routines/level1/xswap.hpp" -#include "routines/level1/xscal.hpp" -#include "routines/level1/xcopy.hpp" -#include "routines/level1/xaxpy.hpp" -#include "routines/level1/xdot.hpp" -#include "routines/level1/xdotu.hpp" -#include "routines/level1/xdotc.hpp" -#include "routines/level1/xnrm2.hpp" -#include "routines/level1/xasum.hpp" -#include "routines/level1/xsum.hpp" // non-BLAS routine -#include "routines/level1/xamax.hpp" -#include "routines/level1/xamin.hpp" // non-BLAS routine -#include "routines/level1/xmax.hpp" // non-BLAS routine -#include "routines/level1/xmin.hpp" // non-BLAS routine - -// BLAS level-2 includes -#include "routines/level2/xgemv.hpp" -#include "routines/level2/xgbmv.hpp" -#include "routines/level2/xhemv.hpp" -#include "routines/level2/xhbmv.hpp" -#include "routines/level2/xhpmv.hpp" -#include "routines/level2/xsymv.hpp" -#include "routines/level2/xsbmv.hpp" -#include "routines/level2/xspmv.hpp" -#include "routines/level2/xtrmv.hpp" -#include "routines/level2/xtbmv.hpp" -#include "routines/level2/xtpmv.hpp" -#include "routines/level2/xtrsv.hpp" -#include "routines/level2/xger.hpp" -#include "routines/level2/xgeru.hpp" -#include "routines/level2/xgerc.hpp" -#include "routines/level2/xher.hpp" -#include "routines/level2/xhpr.hpp" -#include "routines/level2/xher2.hpp" -#include "routines/level2/xhpr2.hpp" -#include "routines/level2/xsyr.hpp" -#include "routines/level2/xspr.hpp" -#include "routines/level2/xsyr2.hpp" -#include "routines/level2/xspr2.hpp" - -// BLAS level-3 includes -#include "routines/level3/xgemm.hpp" -#include "routines/level3/xsymm.hpp" -#include "routines/level3/xhemm.hpp" -#include "routines/level3/xsyrk.hpp" -#include "routines/level3/xherk.hpp" -#include "routines/level3/xsyr2k.hpp" -#include "routines/level3/xher2k.hpp" -#include "routines/level3/xtrmm.hpp" -#include "routines/level3/xtrsm.hpp" - -// Level-x includes (non-BLAS) -#include "routines/levelx/xomatcopy.hpp" -#include "routines/levelx/xim2col.hpp" -#include "routines/levelx/xaxpybatched.hpp" -#include "routines/levelx/xgemmbatched.hpp" - namespace clblast { // ================================================================================================= @@ -2389,153 +2331,6 @@ template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, cl_mem, const size_t*, const size_t, const size_t, cl_command_queue*, cl_event*); -// ================================================================================================= - -// Clears the cache of stored binaries -StatusCode ClearCache() { - try { - ProgramCache::Instance().Invalidate(); - BinaryCache::Instance().Invalidate(); - } catch (...) { return DispatchException(); } - return StatusCode::kSuccess; -} - -template -void FillCacheForPrecision(Queue &queue) { - try { - - // Runs all the level 1 set-up functions - Xswap(queue, nullptr); Xswap(queue, nullptr); - Xswap(queue, nullptr); Xswap(queue, nullptr); - Xscal(queue, nullptr); Xscal(queue, nullptr); - Xcopy(queue, nullptr); Xcopy(queue, nullptr); - Xaxpy(queue, nullptr); Xaxpy(queue, nullptr); - Xdot(queue, nullptr); - Xdotu(queue, nullptr); - Xdotc(queue, nullptr); - Xnrm2(queue, nullptr); Xnrm2(queue, nullptr); - Xasum(queue, nullptr); Xasum(queue, nullptr); - Xsum(queue, nullptr); Xsum(queue, nullptr); - Xamax(queue, nullptr); Xamax(queue, nullptr); - Xmax(queue, nullptr); Xmax(queue, nullptr); - Xmin(queue, nullptr); Xmin(queue, nullptr); - - // Runs all the level 2 set-up functions - Xgemv(queue, nullptr); Xgemv(queue, nullptr); - Xgbmv(queue, nullptr); Xgbmv(queue, nullptr); - Xhemv(queue, nullptr); - Xhbmv(queue, nullptr); - Xhpmv(queue, nullptr); - Xsymv(queue, nullptr); - Xsbmv(queue, nullptr); - Xspmv(queue, nullptr); - Xtrmv(queue, nullptr); Xtrmv(queue, nullptr); - Xtbmv(queue, nullptr); Xtbmv(queue, nullptr); - Xtpmv(queue, nullptr); Xtpmv(queue, nullptr); - Xger(queue, nullptr); - Xgeru(queue, nullptr); - Xgerc(queue, nullptr); - Xher(queue, nullptr); - Xhpr(queue, nullptr); - Xher2(queue, nullptr); - Xhpr2(queue, nullptr); - Xsyr(queue, nullptr); - Xspr(queue, nullptr); - Xsyr2(queue, nullptr); - Xspr2(queue, nullptr); - - // Runs all the level 3 set-up functions - Xgemm(queue, nullptr); Xgemm(queue, nullptr); - Xsymm(queue, nullptr); Xsymm(queue, nullptr); - Xhemm(queue, nullptr); - Xsyrk(queue, nullptr); Xsyrk(queue, nullptr); - Xherk(queue, nullptr); - Xsyr2k(queue, nullptr); Xsyr2k(queue, nullptr); - Xher2k(queue, nullptr); - Xtrmm(queue, nullptr); Xtrmm(queue, nullptr); - - // Runs all the non-BLAS set-up functions - Xomatcopy(queue, nullptr); Xomatcopy(queue, nullptr); - - } catch(const RuntimeErrorCode &e) { - if (e.status() != StatusCode::kNoDoublePrecision && - e.status() != StatusCode::kNoHalfPrecision) { - throw; - } - } -} - -// Fills the cache with all binaries for a specific device -// TODO: Add half-precision FP16 set-up calls -StatusCode FillCache(const cl_device_id device) { - try { - - // Creates a sample context and queue to match the normal routine calling conventions - auto device_cpp = Device(device); - auto context = Context(device_cpp); - auto queue = Queue(context, device_cpp); - - FillCacheForPrecision(queue); - FillCacheForPrecision(queue); - - } catch (...) { return DispatchException(); } - return StatusCode::kSuccess; -} - -// ================================================================================================= - -// Overrides the tuning parameters for this device-precision-kernel combination -StatusCode OverrideParameters(const cl_device_id device, const std::string &kernel_name, - const Precision precision, - const std::unordered_map ¶meters) { - try { - - // Retrieves the device name - const auto device_cpp = Device(device); - const auto platform_id = device_cpp.PlatformID(); - const auto device_name = GetDeviceName(device_cpp); - - // Retrieves the current database values to verify whether the new ones are complete - auto in_cache = false; - auto current_database = DatabaseCache::Instance().Get(DatabaseKeyRef{platform_id, device, precision, kernel_name}, &in_cache); - if (!in_cache) { - log_debug("Searching database for kernel '" + kernel_name + "'"); - current_database = Database(device_cpp, kernel_name, precision, {}); - } - - // Verifies the parameters size - const auto current_parameter_names = current_database.GetParameterNames(); - if (current_parameter_names.size() != parameters.size()) { - return StatusCode::kMissingOverrideParameter; - } - - // Retrieves the names and values separately and in the same order as the existing database - auto parameter_values = database::Params{0}; - auto i = size_t{0}; - for (const auto ¤t_param : current_parameter_names) { - if (parameters.find(current_param) == parameters.end()) { - return StatusCode::kMissingOverrideParameter; - } - const auto parameter_value = parameters.at(current_param); - parameter_values[i] = parameter_value; - ++i; - } - - // Creates a small custom database based on the provided parameters - const auto database_device = database::DatabaseDevice{database::kDeviceNameDefault, parameter_values}; - const auto database_architecture = database::DatabaseArchitecture{"default", {database_device}}; - const auto database_vendor = database::DatabaseVendor{database::kDeviceTypeAll, "default", {database_architecture}}; - const auto database_entry = database::DatabaseEntry{kernel_name, precision, current_parameter_names, {database_vendor}}; - const auto database_entries = std::vector{database_entry}; - const auto database = Database(device_cpp, kernel_name, precision, database_entries); - - // Removes the old database entry and stores the new one in the cache - DatabaseCache::Instance().Remove(DatabaseKey{platform_id, device, precision, kernel_name}); - DatabaseCache::Instance().Store(DatabaseKey{platform_id, device, precision, kernel_name}, Database(database)); - - } catch (...) { return DispatchException(); } - return StatusCode::kSuccess; -} // ================================================================================================= } // namespace clblast diff --git a/src/routines/routines.hpp b/src/routines/routines.hpp new file mode 100644 index 00000000..9e7768b9 --- /dev/null +++ b/src/routines/routines.hpp @@ -0,0 +1,76 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file contains all the includes of all the routines in CLBlast. +// +// ================================================================================================= + +#ifndef CLBLAST_ROUTINES_ROUTINES_H_ +#define CLBLAST_ROUTINES_ROUTINES_H_ + +// BLAS level-1 includes +#include "routines/level1/xswap.hpp" +#include "routines/level1/xscal.hpp" +#include "routines/level1/xcopy.hpp" +#include "routines/level1/xaxpy.hpp" +#include "routines/level1/xdot.hpp" +#include "routines/level1/xdotu.hpp" +#include "routines/level1/xdotc.hpp" +#include "routines/level1/xnrm2.hpp" +#include "routines/level1/xasum.hpp" +#include "routines/level1/xsum.hpp" // non-BLAS routine +#include "routines/level1/xamax.hpp" +#include "routines/level1/xamin.hpp" // non-BLAS routine +#include "routines/level1/xmax.hpp" // non-BLAS routine +#include "routines/level1/xmin.hpp" // non-BLAS routine + +// BLAS level-2 includes +#include "routines/level2/xgemv.hpp" +#include "routines/level2/xgbmv.hpp" +#include "routines/level2/xhemv.hpp" +#include "routines/level2/xhbmv.hpp" +#include "routines/level2/xhpmv.hpp" +#include "routines/level2/xsymv.hpp" +#include "routines/level2/xsbmv.hpp" +#include "routines/level2/xspmv.hpp" +#include "routines/level2/xtrmv.hpp" +#include "routines/level2/xtbmv.hpp" +#include "routines/level2/xtpmv.hpp" +#include "routines/level2/xtrsv.hpp" +#include "routines/level2/xger.hpp" +#include "routines/level2/xgeru.hpp" +#include "routines/level2/xgerc.hpp" +#include "routines/level2/xher.hpp" +#include "routines/level2/xhpr.hpp" +#include "routines/level2/xher2.hpp" +#include "routines/level2/xhpr2.hpp" +#include "routines/level2/xsyr.hpp" +#include "routines/level2/xspr.hpp" +#include "routines/level2/xsyr2.hpp" +#include "routines/level2/xspr2.hpp" + +// BLAS level-3 includes +#include "routines/level3/xgemm.hpp" +#include "routines/level3/xsymm.hpp" +#include "routines/level3/xhemm.hpp" +#include "routines/level3/xsyrk.hpp" +#include "routines/level3/xherk.hpp" +#include "routines/level3/xsyr2k.hpp" +#include "routines/level3/xher2k.hpp" +#include "routines/level3/xtrmm.hpp" +#include "routines/level3/xtrsm.hpp" + +// Level-x includes (non-BLAS) +#include "routines/levelx/xomatcopy.hpp" +#include "routines/levelx/xim2col.hpp" +#include "routines/levelx/xaxpybatched.hpp" +#include "routines/levelx/xgemmbatched.hpp" + +// CLBLAST_ROUTINES_ROUTINES_H_ +#endif -- cgit v1.2.3 From e8f1de0265900db42b8dd097245a765ce7f699b5 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Mon, 9 Oct 2017 18:30:19 +0200 Subject: Made the half-precision header OpenCL-independent --- include/clblast_half.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/include/clblast_half.h b/include/clblast_half.h index 3d77fdd9..b8de8537 100644 --- a/include/clblast_half.h +++ b/include/clblast_half.h @@ -18,13 +18,6 @@ #ifndef CLBLAST_HALF_H_ #define CLBLAST_HALF_H_ -// Includes the normal OpenCL C header -#if defined(__APPLE__) || defined(__MACOSX) - #include -#else - #include -#endif - // MSVC 2013 doesn't fully support C99 #ifdef _MSC_VER #define inline __inline @@ -34,6 +27,7 @@ // The host data-type for half-precision floating-point (16-bit) is based on the `cl_half` OpenCL // type, which is a typedef for unsigned short. +typedef unsigned short half; // 32-bit union for conversions typedef union ConversionBits_ { @@ -46,7 +40,7 @@ typedef union ConversionBits_ { // Converts a IEEE-compliant single-precision value to half-precision floating-point. This function // applies simple truncation (round toward zero, but with overflows set to infinity) as rounding // mode. -inline cl_half FloatToHalf(const float value) { +inline half FloatToHalf(const float value) { static const unsigned short base_table[512] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, @@ -107,7 +101,7 @@ inline cl_half FloatToHalf(const float value) { } // Converts a half-precision value to IEEE-compliant single-precision floating-point -inline float HalfToFloat(const cl_half value) { +inline float HalfToFloat(const half value) { static const unsigned int mantissa_table[2048] = { 0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34A00000, 0x34C00000, 0x34E00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000, 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35A00000, 0x35A80000, 0x35B00000, 0x35B80000, 0x35C00000, 0x35C80000, 0x35D00000, 0x35D80000, 0x35E00000, 0x35E80000, 0x35F00000, 0x35F80000, -- cgit v1.2.3 From 44246053a595af533308fcd2761664212f28aaaf Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Mon, 9 Oct 2017 19:41:40 +0200 Subject: Removed include of clpp11.hpp in places other than utilities.hpp --- src/routines/common.hpp | 3 +-- src/utilities/clblast_exceptions.hpp | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/routines/common.hpp b/src/routines/common.hpp index 84ccd9d2..bf3b1762 100644 --- a/src/routines/common.hpp +++ b/src/routines/common.hpp @@ -19,8 +19,7 @@ #include #include -#include "clpp11.hpp" -#include "clblast.h" +#include "utilities/utilities.hpp" #include "database/database.hpp" namespace clblast { diff --git a/src/utilities/clblast_exceptions.hpp b/src/utilities/clblast_exceptions.hpp index 0d0033b6..a790be9c 100644 --- a/src/utilities/clblast_exceptions.hpp +++ b/src/utilities/clblast_exceptions.hpp @@ -16,8 +16,7 @@ #ifndef CLBLAST_EXCEPTIONS_H_ #define CLBLAST_EXCEPTIONS_H_ -#include "clpp11.hpp" -#include "clblast.h" +#include "utilities/utilities.hpp" namespace clblast { // ================================================================================================= -- cgit v1.2.3 From 9224da19ef384c1a7986587a682035905f63cf55 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Mon, 9 Oct 2017 20:06:25 +0200 Subject: Fixed the Python generator script w.r.t. the recent change of testing direct/in-direct GEMM kernels separately --- scripts/generator/generator/cpp.py | 15 ++++++++++----- scripts/generator/generator/datatype.py | 6 +++--- test/correctness/routines/level3/xgemm.cpp | 5 ----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/scripts/generator/generator/cpp.py b/scripts/generator/generator/cpp.py index 964b8f3e..5fef3083 100644 --- a/scripts/generator/generator/cpp.py +++ b/scripts/generator/generator/cpp.py @@ -364,7 +364,9 @@ def performance_test(routine, level_string): found = False for flavour in routine.flavours: if flavour.precision_name == precision: - result += NL + " clblast::RunClient(beta)[0], reinterpret_cast(beta)[1]}" return "beta" - def test_template(self): + def test_template(self, extra_template_argument): """Returns the template as used in the correctness/performance tests""" buffer_type = "clblast::" + self.buffer_type if self.is_non_standard() else self.buffer_type beta_cpp = "clblast::" + self.beta_cpp if self.beta_cpp in [D_HALF, D_FLOAT2, D_DOUBLE2] else self.beta_cpp if self.buffer_type != self.beta_cpp: - return "<" + buffer_type + "," + self.beta_cpp + ">, " + buffer_type + ", " + beta_cpp - return "<" + buffer_type + ">, " + buffer_type + ", " + beta_cpp + return "<" + extra_template_argument + buffer_type + "," + self.beta_cpp + ">, " + buffer_type + ", " + beta_cpp + return "<" + extra_template_argument + buffer_type + ">, " + buffer_type + ", " + beta_cpp def is_complex(self, scalar): """Current scalar is complex""" diff --git a/test/correctness/routines/level3/xgemm.cpp b/test/correctness/routines/level3/xgemm.cpp index bdf57b36..351e538b 100644 --- a/test/correctness/routines/level3/xgemm.cpp +++ b/test/correctness/routines/level3/xgemm.cpp @@ -15,21 +15,16 @@ // Main function (not within the clblast namespace) int main(int argc, char *argv[]) { auto errors = size_t{0}; - - // Tests GEMM based on the 'in-direct' kernel errors += clblast::RunTests, float, float>(argc, argv, false, "SGEMM"); errors += clblast::RunTests, double, double>(argc, argv, true, "DGEMM"); errors += clblast::RunTests, clblast::float2, clblast::float2>(argc, argv, true, "CGEMM"); errors += clblast::RunTests, clblast::double2, clblast::double2>(argc, argv, true, "ZGEMM"); errors += clblast::RunTests, clblast::half, clblast::half>(argc, argv, true, "HGEMM"); - - // Tests GEMM based on the 'direct' kernel errors += clblast::RunTests, float, float>(argc, argv, true, "SGEMM"); errors += clblast::RunTests, double, double>(argc, argv, true, "DGEMM"); errors += clblast::RunTests, clblast::float2, clblast::float2>(argc, argv, true, "CGEMM"); errors += clblast::RunTests, clblast::double2, clblast::double2>(argc, argv, true, "ZGEMM"); errors += clblast::RunTests, clblast::half, clblast::half>(argc, argv, true, "HGEMM"); - if (errors > 0) { return 1; } else { return 0; } } -- cgit v1.2.3 From b901809345848b44442c787380b13db5e5156df0 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Wed, 11 Oct 2017 23:16:57 +0200 Subject: Added first (untested) version of a CUDA API --- CMakeLists.txt | 98 +- include/clblast_cuda.h | 643 +++++++++ scripts/generator/generator.py | 12 +- scripts/generator/generator/cpp.py | 22 +- scripts/generator/generator/routine.py | 28 +- src/api_common.cpp | 2 +- src/clblast_cuda.cpp | 2336 ++++++++++++++++++++++++++++++++ src/cupp11.hpp | 770 +++++++++++ src/utilities/buffer_test.hpp | 2 +- src/utilities/utilities.hpp | 9 +- 10 files changed, 3874 insertions(+), 48 deletions(-) create mode 100644 include/clblast_cuda.h create mode 100644 src/clblast_cuda.cpp create mode 100644 src/cupp11.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 52accbd4..a5a41f35 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,6 +30,23 @@ option(TESTS "Enable compilation of the correctness tests" OFF) option(NETLIB "Enable compilation of the CBLAS Netlib API" OFF) option(CUBLAS "Enables performance comparison against cuBLAS on NVIDIA GPUs" OFF) +# Select between an OpenCL API (default) or a CUDA API (beta) +option(OPENCL "Build CLBlast with an OpenCL API (default)" ON) +option(CUDA "Build CLBlast with a CUDA API (beta)" OFF) +if(NOT OPENCL AND NOT CUDA) + message(FATAL_ERROR "No API selected, choose from OpenCL (-DOPENCL=ON) or CUDA (-DCUDA=ON)") +endif() +if(OPENCL AND CUDA) + message(FATAL_ERROR "Multiple APIs selected, choose either OpenCL (-DOPENCL=ON -DCUDA=OFF) or CUDA (-DCUDA=ON -DOPENCL=OFF)") +endif() +if(OPENCL) + message("-- Building CLBlast with OpenCL API (default)") + add_definitions(-DOPENCL_API) +elseif(CUDA) + message("-- Building CLBlast with CUDA API (beta)") + add_definitions(-DCUDA_API) +endif() + # Compile in verbose mode with additional diagnostic messages option(VERBOSE "Compile in verbose mode for additional diagnostic messages" OFF) if(VERBOSE) @@ -123,8 +140,18 @@ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CFLAGS}") # Package scripts location set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${clblast_SOURCE_DIR}/cmake/Modules/") -# Requires OpenCL. It is found through the included "FindOpenCL.cmake" in CMAKE_MODULE_PATH. -find_package(OpenCL REQUIRED) +if(OPENCL) + # Requires OpenCL. It is found through the included "FindOpenCL.cmake" in CMAKE_MODULE_PATH. + find_package(OpenCL REQUIRED) + set(API_LIBRARIES ${OPENCL_LIBRARIES}) + set(API_INCLUDE_DIRS ${OPENCL_INCLUDE_DIRS}) +elseif(CUDA) + # For CUDA, the "FindCUDA.cmake" is part of CMake + find_package(CUDA REQUIRED) + set(API_LIBRARIES cuda nvrtc) + set(API_INCLUDE_DIRS ${CUDA_INCLUDE_DIRS}) + link_directories(${CUDA_TOOLKIT_ROOT_DIR}/lib64) +endif() # Locates the CLTune library in case the tuners need to be compiled. "FindCLTune.cmake" is included. if(TUNERS) @@ -161,11 +188,6 @@ set(KERNELS copy_fast copy_pad transpose_fast transpose_pad xaxpy xdot xger xgemm xgemm_direct xgemv) set(DATABASES copy pad padtranspose transpose xaxpy xdot xgemm xgemm_direct xgemv xgemv_fast xgemv_fast_rot xger) -set(SAMPLE_PROGRAMS_CPP sgemm sgemm_batched) -set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache) -if(NETLIB) - set(SAMPLE_PROGRAMS_C ${SAMPLE_PROGRAMS_C} sgemm_netlib) -endif() set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2 xasum xamax) set(LEVEL2_ROUTINES xgemv xgbmv xhemv xhbmv xhpmv xsymv xsbmv xspmv xtrmv xtbmv xtpmv xtrsv xger xgeru xgerc xher xhpr xher2 xhpr2 xsyr xspr xsyr2 xspr2) @@ -173,6 +195,16 @@ set(LEVEL3_ROUTINES xgemm xsymm xhemm xsyrk xherk xsyr2k xher2k xtrmm xtrsm) set(LEVELX_ROUTINES xomatcopy xim2col xaxpybatched xgemmbatched) set(ROUTINES ${LEVEL1_ROUTINES} ${LEVEL2_ROUTINES} ${LEVEL3_ROUTINES} ${LEVELX_ROUTINES}) set(PRECISIONS 32 64 3232 6464 16) +if(OPENCL) + set(SAMPLE_PROGRAMS_CPP sgemm sgemm_batched) + set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache) + if(NETLIB) + set(SAMPLE_PROGRAMS_C ${SAMPLE_PROGRAMS_C} sgemm_netlib) + endif() +elseif(CUDA) + set(SAMPLE_PROGRAMS_CPP ) + set(SAMPLE_PROGRAMS_C ) +endif() # ================================================================================================== @@ -184,14 +216,10 @@ set(SOURCES src/utilities/utilities.cpp src/api_common.cpp src/cache.cpp - src/clblast.cpp - src/clblast_c.cpp src/routine.cpp src/routines/levelx/xinvert.cpp # only source, don't include it as a test ) set(HEADERS # such that they can be discovered by IDEs such as CLion and Visual Studio - include/clblast.h - include/clblast_c.h include/clblast_half.h src/database/apple_cpu_fallback.hpp src/database/database.hpp @@ -209,13 +237,19 @@ set(HEADERS # such that they can be discovered by IDEs such as CLion and Visual src/utilities/msvc.hpp src/utilities/utilities.hpp src/cache.hpp - src/clpp11.hpp src/cxpp11_common.hpp src/routine.hpp ) -if(NETLIB) - set(SOURCES ${SOURCES} src/clblast_netlib_c.cpp) - set(HEADERS ${HEADERS} include/clblast_netlib_c.h) +if(OPENCL) + set(SOURCES ${SOURCES} src/clblast.cpp src/clblast_c.cpp) + set(HEADERS ${HEADERS} include/clblast.h include/clblast_c.h src/clpp11.hpp) + if(NETLIB) + set(SOURCES ${SOURCES} src/clblast_netlib_c.cpp) + set(HEADERS ${HEADERS} include/clblast_netlib_c.h) + endif() +elseif(CUDA) + set(SOURCES ${SOURCES} src/clblast_cuda.cpp) + set(HEADERS ${HEADERS} include/clblast_cuda.h src/cupp11.hpp) endif() foreach(ROUTINE ${LEVEL1_ROUTINES}) set(SOURCES ${SOURCES} src/routines/level1/${ROUTINE}.cpp) @@ -249,14 +283,14 @@ else(BUILD_SHARED_LIBS) add_library(clblast STATIC ${SOURCES} ${HEADERS}) endif() -target_link_libraries(clblast ${OPENCL_LIBRARIES}) +target_link_libraries(clblast ${API_LIBRARIES}) # Includes directories: CLBlast and OpenCL target_include_directories(clblast PUBLIC $ $ $ - ${OPENCL_INCLUDE_DIRS}) + ${API_INCLUDE_DIRS}) # Sets the proper __declspec(dllexport) keyword for Visual Studio when the library is built if(MSVC) @@ -267,11 +301,15 @@ endif() # Installs the library install(TARGETS clblast EXPORT CLBlast DESTINATION lib) -install(FILES include/clblast.h DESTINATION include) -install(FILES include/clblast_c.h DESTINATION include) install(FILES include/clblast_half.h DESTINATION include) -if(NETLIB) - install(FILES include/clblast_netlib_c.h DESTINATION include) +if(OPENCL) + install(FILES include/clblast.h DESTINATION include) + install(FILES include/clblast_c.h DESTINATION include) + if(NETLIB) + install(FILES include/clblast_netlib_c.h DESTINATION include) + endif() +elseif(CUDA) + install(FILES include/clblast_cuda.h DESTINATION include) endif() # Installs the config for find_package in dependent projects @@ -291,19 +329,21 @@ endif() if(SAMPLES) # Downloads the cl.hpp file from Khronos - file(DOWNLOAD https://www.khronos.org/registry/OpenCL/api/2.1/cl.hpp ${clblast_SOURCE_DIR}/samples/cl.hpp) + if(OPENCL) + file(DOWNLOAD https://www.khronos.org/registry/OpenCL/api/2.1/cl.hpp ${clblast_SOURCE_DIR}/samples/cl.hpp) + endif() # Adds sample programs (C++) foreach(SAMPLE ${SAMPLE_PROGRAMS_CPP}) add_executable(clblast_sample_${SAMPLE} samples/${SAMPLE}.cpp) - target_link_libraries(clblast_sample_${SAMPLE} clblast ${OPENCL_LIBRARIES}) + target_link_libraries(clblast_sample_${SAMPLE} clblast ${API_LIBRARIES}) install(TARGETS clblast_sample_${SAMPLE} DESTINATION bin) endforeach() # Adds sample programs (C) foreach(SAMPLE ${SAMPLE_PROGRAMS_C}) add_executable(clblast_sample_${SAMPLE}_c samples/${SAMPLE}.c) - target_link_libraries(clblast_sample_${SAMPLE}_c clblast ${OPENCL_LIBRARIES}) + target_link_libraries(clblast_sample_${SAMPLE}_c clblast ${API_LIBRARIES}) install(TARGETS clblast_sample_${SAMPLE}_c DESTINATION bin) endforeach() @@ -324,7 +364,7 @@ if(TUNERS) # Adds tuning executables foreach(KERNEL ${KERNELS}) add_executable(clblast_tuner_${KERNEL} ${TUNERS_COMMON} src/tuning/kernels/${KERNEL}.cpp) - target_link_libraries(clblast_tuner_${KERNEL} clblast ${CLTUNE_LIBRARIES} ${OPENCL_LIBRARIES}) + target_link_libraries(clblast_tuner_${KERNEL} clblast ${CLTUNE_LIBRARIES} ${API_LIBRARIES}) target_include_directories(clblast_tuner_${KERNEL} PUBLIC ${CLTUNE_INCLUDE_DIRS}) install(TARGETS clblast_tuner_${KERNEL} DESTINATION bin) endforeach() @@ -429,7 +469,7 @@ if(CLIENTS) test/routines/levelx/${ROUTINE}.hpp) endforeach() foreach(ROUTINE ${ROUTINES}) - target_link_libraries(clblast_client_${ROUTINE} clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES}) + target_link_libraries(clblast_client_${ROUTINE} clblast ${REF_LIBRARIES} ${API_LIBRARIES}) target_include_directories(clblast_client_${ROUTINE} PUBLIC ${clblast_SOURCE_DIR} ${REF_INCLUDES}) install(TARGETS clblast_client_${ROUTINE} DESTINATION bin) endforeach() @@ -481,7 +521,7 @@ if(TESTS) test/routines/levelx/${ROUTINE}.hpp) endforeach() foreach(ROUTINE ${ROUTINES}) - target_link_libraries(clblast_test_${ROUTINE} clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES}) + target_link_libraries(clblast_test_${ROUTINE} clblast ${REF_LIBRARIES} ${API_LIBRARIES}) install(TARGETS clblast_test_${ROUTINE} DESTINATION bin) target_include_directories(clblast_test_${ROUTINE} PUBLIC ${clblast_SOURCE_DIR} ${REF_INCLUDES}) add_test(clblast_test_${ROUTINE} clblast_test_${ROUTINE}) @@ -492,7 +532,7 @@ if(TESTS) foreach(MISC_TEST ${MISC_TESTS}) add_executable(clblast_test_${MISC_TEST} ${TESTS_COMMON} test/correctness/misc/${MISC_TEST}.cpp) - target_link_libraries(clblast_test_${MISC_TEST} clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES}) + target_link_libraries(clblast_test_${MISC_TEST} clblast ${REF_LIBRARIES} ${API_LIBRARIES}) target_include_directories(clblast_test_${MISC_TEST} PUBLIC $ ${clblast_SOURCE_DIR} ${REF_INCLUDES}) @@ -501,7 +541,7 @@ if(TESTS) # CLBlast diagnostics add_executable(clblast_test_diagnostics ${TESTS_COMMON} test/diagnostics.cpp) - target_link_libraries(clblast_test_diagnostics clblast ${REF_LIBRARIES} ${OPENCL_LIBRARIES}) + target_link_libraries(clblast_test_diagnostics clblast ${REF_LIBRARIES} ${API_LIBRARIES}) target_include_directories(clblast_test_diagnostics PUBLIC $ ${clblast_SOURCE_DIR} ${REF_INCLUDES}) diff --git a/include/clblast_cuda.h b/include/clblast_cuda.h new file mode 100644 index 00000000..c125c302 --- /dev/null +++ b/include/clblast_cuda.h @@ -0,0 +1,643 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file contains the special CUDA interface to the CLBlast BLAS routines. It also contains the +// definitions of the returned status codes and the layout and transpose types. This is the header +// users of the CUDA API of CLBlast should include and use. +// +// ================================================================================================= + +#ifndef CLBLAST_CLBLAST_CUDA_H_ +#define CLBLAST_CLBLAST_CUDA_H_ + +#include // For size_t +#include // For OverrideParameters function +#include // For OverrideParameters function + +// CUDA +#include // CUDA driver API +#include // NVIDIA runtime compilation API + +// Exports library functions under Windows when building a DLL. See also: +// https://msdn.microsoft.com/en-us/library/a90k134d.aspx +#if defined(_WIN32) && defined(CLBLAST_DLL) + #if defined(COMPILING_DLL) + #define PUBLIC_API __declspec(dllexport) + #else + #define PUBLIC_API __declspec(dllimport) + #endif +#else + #define PUBLIC_API +#endif + +namespace clblast { +// ================================================================================================= + +// Status codes. These codes can be returned by functions declared in this header file. The error +// codes match either the standard CUDA driver API error codes or the regular CLBlast error codes. +enum class StatusCode { + + // Status codes in common with the OpenCL standard + kSuccess = 0, // CUDA_SUCCESS + kInvalidLocalNumDimensions = -53, // CL_INVALID_WORK_DIMENSION: Too many thread dimensions + kInvalidLocalThreadsTotal = -54, // CL_INVALID_WORK_GROUP_SIZE: Too many threads in total + kInvalidLocalThreadsDim = -55, // CL_INVALID_WORK_ITEM_SIZE: ... or for a specific dimension + + // Status codes in common with the clBLAS library + kNotImplemented = -1024, // Routine or functionality not implemented yet + kInvalidMatrixA = -1022, // Matrix A is not a valid OpenCL buffer + kInvalidMatrixB = -1021, // Matrix B is not a valid OpenCL buffer + kInvalidMatrixC = -1020, // Matrix C is not a valid OpenCL buffer + kInvalidVectorX = -1019, // Vector X is not a valid OpenCL buffer + kInvalidVectorY = -1018, // Vector Y is not a valid OpenCL buffer + kInvalidDimension = -1017, // Dimensions M, N, and K have to be larger than zero + kInvalidLeadDimA = -1016, // LD of A is smaller than the matrix's first dimension + kInvalidLeadDimB = -1015, // LD of B is smaller than the matrix's first dimension + kInvalidLeadDimC = -1014, // LD of C is smaller than the matrix's first dimension + kInvalidIncrementX = -1013, // Increment of vector X cannot be zero + kInvalidIncrementY = -1012, // Increment of vector Y cannot be zero + kInsufficientMemoryA = -1011, // Matrix A's OpenCL buffer is too small + kInsufficientMemoryB = -1010, // Matrix B's OpenCL buffer is too small + kInsufficientMemoryC = -1009, // Matrix C's OpenCL buffer is too small + kInsufficientMemoryX = -1008, // Vector X's OpenCL buffer is too small + kInsufficientMemoryY = -1007, // Vector Y's OpenCL buffer is too small + + // Custom additional status codes for CLBlast + kInvalidBatchCount = -2049, // The batch count needs to be positive + kInvalidOverrideKernel = -2048, // Trying to override parameters for an invalid kernel + kMissingOverrideParameter = -2047, // Missing override parameter(s) for the target kernel + kInvalidLocalMemUsage = -2046, // Not enough local memory available on this device + kNoHalfPrecision = -2045, // Half precision (16-bits) not supported by the device + kNoDoublePrecision = -2044, // Double precision (64-bits) not supported by the device + kInvalidVectorScalar = -2043, // The unit-sized vector is not a valid OpenCL buffer + kInsufficientMemoryScalar = -2042, // The unit-sized vector's OpenCL buffer is too small + kDatabaseError = -2041, // Entry for the device was not found in the database + kUnknownError = -2040, // A catch-all error code representing an unspecified error + kUnexpectedError = -2039, // A catch-all error code representing an unexpected exception +}; + +// Matrix layout and transpose types +enum class Layout { kRowMajor = 101, kColMajor = 102 }; +enum class Transpose { kNo = 111, kYes = 112, kConjugate = 113 }; +enum class Triangle { kUpper = 121, kLower = 122 }; +enum class Diagonal { kNonUnit = 131, kUnit = 132 }; +enum class Side { kLeft = 141, kRight = 142 }; + +// Precision scoped enum (values in bits) +enum class Precision { kHalf = 16, kSingle = 32, kDouble = 64, + kComplexSingle = 3232, kComplexDouble = 6464, kAny = -1 }; + +// ================================================================================================= +// BLAS level-1 (vector-vector) routines +// ================================================================================================= + +// Generate givens plane rotation: SROTG/DROTG +template +StatusCode Rotg(CUdeviceptr sa_buffer, const size_t sa_offset, + CUdeviceptr sb_buffer, const size_t sb_offset, + CUdeviceptr sc_buffer, const size_t sc_offset, + CUdeviceptr ss_buffer, const size_t ss_offset, + CUstream* stream); + +// Generate modified givens plane rotation: SROTMG/DROTMG +template +StatusCode Rotmg(CUdeviceptr sd1_buffer, const size_t sd1_offset, + CUdeviceptr sd2_buffer, const size_t sd2_offset, + CUdeviceptr sx1_buffer, const size_t sx1_offset, + const CUdeviceptr sy1_buffer, const size_t sy1_offset, + CUdeviceptr sparam_buffer, const size_t sparam_offset, + CUstream* stream); + +// Apply givens plane rotation: SROT/DROT +template +StatusCode Rot(const size_t n, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + const T cos, + const T sin, + CUstream* stream); + +// Apply modified givens plane rotation: SROTM/DROTM +template +StatusCode Rotm(const size_t n, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr sparam_buffer, const size_t sparam_offset, + CUstream* stream); + +// Swap two vectors: SSWAP/DSWAP/CSWAP/ZSWAP/HSWAP +template +StatusCode Swap(const size_t n, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Vector scaling: SSCAL/DSCAL/CSCAL/ZSCAL/HSCAL +template +StatusCode Scal(const size_t n, + const T alpha, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Vector copy: SCOPY/DCOPY/CCOPY/ZCOPY/HCOPY +template +StatusCode Copy(const size_t n, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Vector-times-constant plus vector: SAXPY/DAXPY/CAXPY/ZAXPY/HAXPY +template +StatusCode Axpy(const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Dot product of two vectors: SDOT/DDOT/HDOT +template +StatusCode Dot(const size_t n, + CUdeviceptr dot_buffer, const size_t dot_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Dot product of two complex vectors: CDOTU/ZDOTU +template +StatusCode Dotu(const size_t n, + CUdeviceptr dot_buffer, const size_t dot_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Dot product of two complex vectors, one conjugated: CDOTC/ZDOTC +template +StatusCode Dotc(const size_t n, + CUdeviceptr dot_buffer, const size_t dot_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Euclidian norm of a vector: SNRM2/DNRM2/ScNRM2/DzNRM2/HNRM2 +template +StatusCode Nrm2(const size_t n, + CUdeviceptr nrm2_buffer, const size_t nrm2_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Absolute sum of values in a vector: SASUM/DASUM/ScASUM/DzASUM/HASUM +template +StatusCode Asum(const size_t n, + CUdeviceptr asum_buffer, const size_t asum_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Sum of values in a vector (non-BLAS function): SSUM/DSUM/ScSUM/DzSUM/HSUM +template +StatusCode Sum(const size_t n, + CUdeviceptr sum_buffer, const size_t sum_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Index of absolute maximum value in a vector: iSAMAX/iDAMAX/iCAMAX/iZAMAX/iHAMAX +template +StatusCode Amax(const size_t n, + CUdeviceptr imax_buffer, const size_t imax_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Index of absolute minimum value in a vector (non-BLAS function): iSAMIN/iDAMIN/iCAMIN/iZAMIN/iHAMIN +template +StatusCode Amin(const size_t n, + CUdeviceptr imin_buffer, const size_t imin_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Index of maximum value in a vector (non-BLAS function): iSMAX/iDMAX/iCMAX/iZMAX/iHMAX +template +StatusCode Max(const size_t n, + CUdeviceptr imax_buffer, const size_t imax_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Index of minimum value in a vector (non-BLAS function): iSMIN/iDMIN/iCMIN/iZMIN/iHMIN +template +StatusCode Min(const size_t n, + CUdeviceptr imin_buffer, const size_t imin_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// ================================================================================================= +// BLAS level-2 (matrix-vector) routines +// ================================================================================================= + +// General matrix-vector multiplication: SGEMV/DGEMV/CGEMV/ZGEMV/HGEMV +template +StatusCode Gemv(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// General banded matrix-vector multiplication: SGBMV/DGBMV/CGBMV/ZGBMV/HGBMV +template +StatusCode Gbmv(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, const size_t kl, const size_t ku, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Hermitian matrix-vector multiplication: CHEMV/ZHEMV +template +StatusCode Hemv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Hermitian banded matrix-vector multiplication: CHBMV/ZHBMV +template +StatusCode Hbmv(const Layout layout, const Triangle triangle, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Hermitian packed matrix-vector multiplication: CHPMV/ZHPMV +template +StatusCode Hpmv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr ap_buffer, const size_t ap_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Symmetric matrix-vector multiplication: SSYMV/DSYMV/HSYMV +template +StatusCode Symv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Symmetric banded matrix-vector multiplication: SSBMV/DSBMV/HSBMV +template +StatusCode Sbmv(const Layout layout, const Triangle triangle, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Symmetric packed matrix-vector multiplication: SSPMV/DSPMV/HSPMV +template +StatusCode Spmv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr ap_buffer, const size_t ap_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream); + +// Triangular matrix-vector multiplication: STRMV/DTRMV/CTRMV/ZTRMV/HTRMV +template +StatusCode Trmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Triangular banded matrix-vector multiplication: STBMV/DTBMV/CTBMV/ZTBMV/HTBMV +template +StatusCode Tbmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, const size_t k, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Triangular packed matrix-vector multiplication: STPMV/DTPMV/CTPMV/ZTPMV/HTPMV +template +StatusCode Tpmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, + const CUdeviceptr ap_buffer, const size_t ap_offset, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Solves a triangular system of equations: STRSV/DTRSV/CTRSV/ZTRSV +template +StatusCode Trsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Solves a banded triangular system of equations: STBSV/DTBSV/CTBSV/ZTBSV +template +StatusCode Tbsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, const size_t k, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// Solves a packed triangular system of equations: STPSV/DTPSV/CTPSV/ZTPSV +template +StatusCode Tpsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, + const CUdeviceptr ap_buffer, const size_t ap_offset, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream); + +// General rank-1 matrix update: SGER/DGER/HGER +template +StatusCode Ger(const Layout layout, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream); + +// General rank-1 complex matrix update: CGERU/ZGERU +template +StatusCode Geru(const Layout layout, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream); + +// General rank-1 complex conjugated matrix update: CGERC/ZGERC +template +StatusCode Gerc(const Layout layout, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream); + +// Hermitian rank-1 matrix update: CHER/ZHER +template +StatusCode Her(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream); + +// Hermitian packed rank-1 matrix update: CHPR/ZHPR +template +StatusCode Hpr(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream); + +// Hermitian rank-2 matrix update: CHER2/ZHER2 +template +StatusCode Her2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream); + +// Hermitian packed rank-2 matrix update: CHPR2/ZHPR2 +template +StatusCode Hpr2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream); + +// Symmetric rank-1 matrix update: SSYR/DSYR/HSYR +template +StatusCode Syr(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream); + +// Symmetric packed rank-1 matrix update: SSPR/DSPR/HSPR +template +StatusCode Spr(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream); + +// Symmetric rank-2 matrix update: SSYR2/DSYR2/HSYR2 +template +StatusCode Syr2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream); + +// Symmetric packed rank-2 matrix update: SSPR2/DSPR2/HSPR2 +template +StatusCode Spr2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream); + +// ================================================================================================= +// BLAS level-3 (matrix-matrix) routines +// ================================================================================================= + +// General matrix-matrix multiplication: SGEMM/DGEMM/CGEMM/ZGEMM/HGEMM +template +StatusCode Gemm(const Layout layout, const Transpose a_transpose, const Transpose b_transpose, + const size_t m, const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream); + +// Symmetric matrix-matrix multiplication: SSYMM/DSYMM/CSYMM/ZSYMM/HSYMM +template +StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream); + +// Hermitian matrix-matrix multiplication: CHEMM/ZHEMM +template +StatusCode Hemm(const Layout layout, const Side side, const Triangle triangle, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream); + +// Rank-K update of a symmetric matrix: SSYRK/DSYRK/CSYRK/ZSYRK/HSYRK +template +StatusCode Syrk(const Layout layout, const Triangle triangle, const Transpose a_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream); + +// Rank-K update of a hermitian matrix: CHERK/ZHERK +template +StatusCode Herk(const Layout layout, const Triangle triangle, const Transpose a_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream); + +// Rank-2K update of a symmetric matrix: SSYR2K/DSYR2K/CSYR2K/ZSYR2K/HSYR2K +template +StatusCode Syr2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream); + +// Rank-2K update of a hermitian matrix: CHER2K/ZHER2K +template +StatusCode Her2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const U beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream); + +// Triangular matrix-matrix multiplication: STRMM/DTRMM/CTRMM/ZTRMM/HTRMM +template +StatusCode Trmm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + CUstream* stream); + +// Solves a triangular system of equations: STRSM/DTRSM/CTRSM/ZTRSM +template +StatusCode Trsm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + CUstream* stream); + +// ================================================================================================= +// Extra non-BLAS routines (level-X) +// ================================================================================================= + +// Scaling and out-place transpose/copy (non-BLAS function): SOMATCOPY/DOMATCOPY/COMATCOPY/ZOMATCOPY/HOMATCOPY +template +StatusCode Omatcopy(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + CUstream* stream); + +// Im2col function (non-BLAS function): SIM2COL/DIM2COL/CIM2COL/ZIM2COL/HIM2COL +template +StatusCode Im2col(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, + const CUdeviceptr im_buffer, const size_t im_offset, + CUdeviceptr col_buffer, const size_t col_offset, + CUstream* stream); + +// Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED +template +StatusCode AxpyBatched(const size_t n, + const T *alphas, + const CUdeviceptr x_buffer, const size_t *x_offsets, const size_t x_inc, + CUdeviceptr y_buffer, const size_t *y_offsets, const size_t y_inc, + const size_t batch_count, + CUstream* stream); + +// Batched version of GEMM: SGEMMBATCHED/DGEMMBATCHED/CGEMMBATCHED/ZGEMMBATCHED/HGEMMBATCHED +template +StatusCode GemmBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose, + const size_t m, const size_t n, const size_t k, + const T *alphas, + const CUdeviceptr a_buffer, const size_t *a_offsets, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t *b_offsets, const size_t b_ld, + const T *betas, + CUdeviceptr c_buffer, const size_t *c_offsets, const size_t c_ld, + const size_t batch_count, + CUstream* stream); + +// ================================================================================================= + +// CLBlast stores binaries of compiled kernels into a cache in case the same kernel is used later on +// for the same device. This cache can be cleared to free up system memory or in case of debugging. +StatusCode PUBLIC_API ClearCache(); + +// The cache can also be pre-initialized for a specific device with all possible CLBLast kernels. +// Further CLBlast routine calls will then run at maximum speed. +StatusCode PUBLIC_API FillCache(const CUdevice device); + +// ================================================================================================= + +// Overrides tuning parameters for a specific device-precision-kernel combination. The next time +// the target routine is called it will re-compile and use the new parameters from then on. +StatusCode PUBLIC_API OverrideParameters(const CUdevice device, const std::string &kernel_name, + const Precision precision, + const std::unordered_map ¶meters); + +// ================================================================================================= + +} // namespace clblast + +// CLBLAST_CLBLAST_CUDA_H_ +#endif diff --git a/scripts/generator/generator.py b/scripts/generator/generator.py index 0d34d7fe..520e3fc8 100755 --- a/scripts/generator/generator.py +++ b/scripts/generator/generator.py @@ -12,6 +12,8 @@ # clblast.cpp # clblast_c.h # clblast_c.cpp +# clblast_cuda.h +# clblast_cuda.cpp # clblast_netlib_c.h # clblast_netlib_c.cpp # wrapper_clblas.h @@ -41,9 +43,11 @@ FILES = [ "/test/wrapper_cublas.hpp", "/include/clblast_netlib_c.h", "/src/clblast_netlib_c.cpp", + "/include/clblast_cuda.h", + "/src/clblast_cuda.cpp", ] -HEADER_LINES = [122, 21, 126, 24, 29, 41, 29, 65, 32] -FOOTER_LINES = [25, 3, 27, 38, 6, 6, 6, 9, 2] +HEADER_LINES = [122, 21, 126, 24, 29, 41, 29, 65, 32, 94, 21] +FOOTER_LINES = [25, 3, 27, 38, 6, 6, 6, 9, 2, 25, 3] HEADER_LINES_DOC = 0 FOOTER_LINES_DOC = 63 @@ -224,6 +228,10 @@ def main(argv): if i == 8: if not routine.batched: body += cpp.clblast_netlib_c_cc(routine) + if i == 9: + body += cpp.clblast_h(routine, cuda=True) + if i == 10: + body += cpp.clblast_cc(routine, cuda=True) f.write("".join(file_header)) f.write(body) f.write("".join(file_footer)) diff --git a/scripts/generator/generator/cpp.py b/scripts/generator/generator/cpp.py index 5fef3083..f1ee1959 100644 --- a/scripts/generator/generator/cpp.py +++ b/scripts/generator/generator/cpp.py @@ -36,19 +36,19 @@ HEADER = NL + SEPARATOR + """ """ + SEPARATOR + NL -def clblast_h(routine): +def clblast_h(routine, cuda=False): """The C++ API header (.h)""" result = NL + "// " + routine.description + ": " + routine.short_names() + NL - result += routine.routine_header_cpp(12, " = nullptr") + ";" + NL + result += routine.routine_header_cpp(12, " = nullptr", cuda) + ";" + NL return result -def clblast_cc(routine): +def clblast_cc(routine, cuda=False): """The C++ API implementation (.cpp)""" indent1 = " " * (15 + routine.length()) result = NL + "// " + routine.description + ": " + routine.short_names() + NL if routine.implemented: - result += routine.routine_header_cpp(12, "") + " {" + NL + result += routine.routine_header_cpp(12, "", cuda) + " {" + NL result += " try {" + NL result += " auto queue_cpp = Queue(*queue);" + NL result += " auto routine = X" + routine.plain_name() + "<" + routine.template.template + ">(queue_cpp, event);" + NL @@ -60,14 +60,22 @@ def clblast_cc(routine): result += " return StatusCode::kSuccess;" + NL result += " } catch (...) { return DispatchException(); }" + NL else: - result += routine.routine_header_type_cpp(12) + " {" + NL + result += routine.routine_header_type_cpp(12, cuda) + " {" + NL result += " return StatusCode::kNotImplemented;" + NL result += "}" + NL for flavour in routine.flavours: indent2 = " " * (34 + routine.length() + len(flavour.template)) result += "template StatusCode PUBLIC_API " + routine.capitalized_name() + "<" + flavour.template + ">(" - result += ("," + NL + indent2).join([a for a in routine.arguments_type(flavour)]) - result += "," + NL + indent2 + "cl_command_queue*, cl_event*);" + NL + arguments = routine.arguments_type(flavour) + if cuda: + arguments = [a.replace("cl_mem", "CUdeviceptr") for a in arguments] + result += ("," + NL + indent2).join([a for a in arguments]) + result += "," + NL + indent2 + if cuda: + result += "CUstream*" + else: + result += "cl_command_queue*, cl_event*" + result += ");" + NL return result diff --git a/scripts/generator/generator/routine.py b/scripts/generator/generator/routine.py index cef7db87..c3c1f775 100644 --- a/scripts/generator/generator/routine.py +++ b/scripts/generator/generator/routine.py @@ -802,22 +802,38 @@ class Routine: """Retrieves a list of routine requirements for documentation""" return self.requirements - def routine_header_cpp(self, spaces, default_event): + def routine_header_cpp(self, spaces, default_event, cuda=False): """Retrieves the C++ templated definition for a routine""" indent = " " * (spaces + self.length()) + arguments = self.arguments_def(self.template) + if cuda: + arguments = [a.replace("cl_mem", "CUdeviceptr") for a in arguments] result = "template <" + self.template.name + ">\n" result += "StatusCode " + self.capitalized_name() + "(" - result += (",\n" + indent).join([a for a in self.arguments_def(self.template)]) - result += ",\n" + indent + "cl_command_queue* queue, cl_event* event" + default_event + ")" + result += (",\n" + indent).join([a for a in arguments]) + result += ",\n" + indent + if cuda: + result += "CUstream* stream" + else: + result += "cl_command_queue* queue, cl_event* event" + default_event + result += ")" return result - def routine_header_type_cpp(self, spaces): + def routine_header_type_cpp(self, spaces, cuda=False): """As above, but now without variable names""" indent = " " * (spaces + self.length()) + arguments = self.arguments_type(self.template) + if cuda: + arguments = [a.replace("cl_mem", "CUdeviceptr") for a in arguments] result = "template <" + self.template.name + ">\n" result += "StatusCode " + self.capitalized_name() + "(" - result += (",\n" + indent).join([a for a in self.arguments_type(self.template)]) - result += ",\n" + indent + "cl_command_queue*, cl_event*)" + result += (",\n" + indent).join([a for a in arguments]) + result += ",\n" + indent + if cuda: + result += "CUstream* stream" + else: + result += "cl_command_queue*, cl_event*" + result += ")" return result def routine_header_c(self, flavour, spaces, extra_qualifier): diff --git a/src/api_common.cpp b/src/api_common.cpp index aa7e2b0f..0d387cd9 100644 --- a/src/api_common.cpp +++ b/src/api_common.cpp @@ -12,9 +12,9 @@ #include +#include "utilities/utilities.hpp" #include "cache.hpp" #include "routines/routines.hpp" -#include "clblast.h" namespace clblast { // ================================================================================================= diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp new file mode 100644 index 00000000..5f30d023 --- /dev/null +++ b/src/clblast_cuda.cpp @@ -0,0 +1,2336 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements all the BLAS API calls (CUDA version). In all cases, it does not much more +// than creating a new object of the appropriate type, and calling the main routine on that object. +// It forwards all status codes to the caller. +// +// ================================================================================================= + +#include + +#include "routines/routines.hpp" +#include "clblast_cuda.h" + +namespace clblast { + +// ================================================================================================= +// BLAS level-1 (vector-vector) routines +// ================================================================================================= + +// Generate givens plane rotation: SROTG/DROTG +template +StatusCode Rotg(CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream* stream) { + return StatusCode::kNotImplemented; +} +template StatusCode PUBLIC_API Rotg(CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Rotg(CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// Generate modified givens plane rotation: SROTMG/DROTMG +template +StatusCode Rotmg(CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream* stream) { + return StatusCode::kNotImplemented; +} +template StatusCode PUBLIC_API Rotmg(CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Rotmg(CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// Apply givens plane rotation: SROT/DROT +template +StatusCode Rot(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + const T, + const T, + CUstream* stream) { + return StatusCode::kNotImplemented; +} +template StatusCode PUBLIC_API Rot(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + const float, + const float, + CUstream*); +template StatusCode PUBLIC_API Rot(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + const double, + const double, + CUstream*); + +// Apply modified givens plane rotation: SROTM/DROTM +template +StatusCode Rotm(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream* stream) { + return StatusCode::kNotImplemented; +} +template StatusCode PUBLIC_API Rotm(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Rotm(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// Swap two vectors: SSWAP/DSWAP/CSWAP/ZSWAP/HSWAP +template +StatusCode Swap(const size_t n, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xswap(queue_cpp, event); + routine.DoSwap(n, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Swap(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Swap(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Swap(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Swap(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Swap(const size_t, + CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Vector scaling: SSCAL/DSCAL/CSCAL/ZSCAL/HSCAL +template +StatusCode Scal(const size_t n, + const T alpha, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xscal(queue_cpp, event); + routine.DoScal(n, + alpha, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Scal(const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Scal(const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Scal(const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Scal(const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Scal(const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Vector copy: SCOPY/DCOPY/CCOPY/ZCOPY/HCOPY +template +StatusCode Copy(const size_t n, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xcopy(queue_cpp, event); + routine.DoCopy(n, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Copy(const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Copy(const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Copy(const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Copy(const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Copy(const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Vector-times-constant plus vector: SAXPY/DAXPY/CAXPY/ZAXPY/HAXPY +template +StatusCode Axpy(const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xaxpy(queue_cpp, event); + routine.DoAxpy(n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Axpy(const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Axpy(const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Axpy(const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Axpy(const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Axpy(const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Dot product of two vectors: SDOT/DDOT/HDOT +template +StatusCode Dot(const size_t n, + CUdeviceptr dot_buffer, const size_t dot_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xdot(queue_cpp, event); + routine.DoDot(n, + Buffer(dot_buffer), dot_offset, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Dot(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Dot(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Dot(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Dot product of two complex vectors: CDOTU/ZDOTU +template +StatusCode Dotu(const size_t n, + CUdeviceptr dot_buffer, const size_t dot_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xdotu(queue_cpp, event); + routine.DoDotu(n, + Buffer(dot_buffer), dot_offset, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Dotu(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Dotu(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Dot product of two complex vectors, one conjugated: CDOTC/ZDOTC +template +StatusCode Dotc(const size_t n, + CUdeviceptr dot_buffer, const size_t dot_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xdotc(queue_cpp, event); + routine.DoDotc(n, + Buffer(dot_buffer), dot_offset, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Dotc(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Dotc(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Euclidian norm of a vector: SNRM2/DNRM2/ScNRM2/DzNRM2/HNRM2 +template +StatusCode Nrm2(const size_t n, + CUdeviceptr nrm2_buffer, const size_t nrm2_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xnrm2(queue_cpp, event); + routine.DoNrm2(n, + Buffer(nrm2_buffer), nrm2_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Nrm2(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Nrm2(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Nrm2(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Nrm2(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Nrm2(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Absolute sum of values in a vector: SASUM/DASUM/ScASUM/DzASUM/HASUM +template +StatusCode Asum(const size_t n, + CUdeviceptr asum_buffer, const size_t asum_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xasum(queue_cpp, event); + routine.DoAsum(n, + Buffer(asum_buffer), asum_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Asum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Asum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Asum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Asum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Asum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Sum of values in a vector (non-BLAS function): SSUM/DSUM/ScSUM/DzSUM/HSUM +template +StatusCode Sum(const size_t n, + CUdeviceptr sum_buffer, const size_t sum_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsum(queue_cpp, event); + routine.DoSum(n, + Buffer(sum_buffer), sum_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Sum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Sum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Sum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Sum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Sum(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Index of absolute maximum value in a vector: iSAMAX/iDAMAX/iCAMAX/iZAMAX/iHAMAX +template +StatusCode Amax(const size_t n, + CUdeviceptr imax_buffer, const size_t imax_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xamax(queue_cpp, event); + routine.DoAmax(n, + Buffer(imax_buffer), imax_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Amax(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amax(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amax(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amax(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amax(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Index of absolute minimum value in a vector (non-BLAS function): iSAMIN/iDAMIN/iCAMIN/iZAMIN/iHAMIN +template +StatusCode Amin(const size_t n, + CUdeviceptr imin_buffer, const size_t imin_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xamin(queue_cpp, event); + routine.DoAmin(n, + Buffer(imin_buffer), imin_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Amin(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amin(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amin(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amin(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Amin(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Index of maximum value in a vector (non-BLAS function): iSMAX/iDMAX/iCMAX/iZMAX/iHMAX +template +StatusCode Max(const size_t n, + CUdeviceptr imax_buffer, const size_t imax_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xmax(queue_cpp, event); + routine.DoMax(n, + Buffer(imax_buffer), imax_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Max(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Max(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Max(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Max(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Max(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Index of minimum value in a vector (non-BLAS function): iSMIN/iDMIN/iCMIN/iZMIN/iHMIN +template +StatusCode Min(const size_t n, + CUdeviceptr imin_buffer, const size_t imin_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xmin(queue_cpp, event); + routine.DoMin(n, + Buffer(imin_buffer), imin_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Min(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Min(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Min(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Min(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Min(const size_t, + CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUstream*); + +// ================================================================================================= +// BLAS level-2 (matrix-vector) routines +// ================================================================================================= + +// General matrix-vector multiplication: SGEMV/DGEMV/CGEMV/ZGEMV/HGEMV +template +StatusCode Gemv(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xgemv(queue_cpp, event); + routine.DoGemv(layout, a_transpose, + m, n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// General banded matrix-vector multiplication: SGBMV/DGBMV/CGBMV/ZGBMV/HGBMV +template +StatusCode Gbmv(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, const size_t kl, const size_t ku, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xgbmv(queue_cpp, event); + routine.DoGbmv(layout, a_transpose, + m, n, kl, ku, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, + const size_t, const size_t, const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, + const size_t, const size_t, const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, + const size_t, const size_t, const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, + const size_t, const size_t, const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, + const size_t, const size_t, const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Hermitian matrix-vector multiplication: CHEMV/ZHEMV +template +StatusCode Hemv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xhemv(queue_cpp, event); + routine.DoHemv(layout, triangle, + n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Hemv(const Layout, const Triangle, + const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Hemv(const Layout, const Triangle, + const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Hermitian banded matrix-vector multiplication: CHBMV/ZHBMV +template +StatusCode Hbmv(const Layout layout, const Triangle triangle, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xhbmv(queue_cpp, event); + routine.DoHbmv(layout, triangle, + n, k, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Hbmv(const Layout, const Triangle, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Hbmv(const Layout, const Triangle, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Hermitian packed matrix-vector multiplication: CHPMV/ZHPMV +template +StatusCode Hpmv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr ap_buffer, const size_t ap_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xhpmv(queue_cpp, event); + routine.DoHpmv(layout, triangle, + n, + alpha, + Buffer(ap_buffer), ap_offset, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Hpmv(const Layout, const Triangle, + const size_t, + const float2, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Hpmv(const Layout, const Triangle, + const size_t, + const double2, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Symmetric matrix-vector multiplication: SSYMV/DSYMV/HSYMV +template +StatusCode Symv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsymv(queue_cpp, event); + routine.DoSymv(layout, triangle, + n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Symv(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Symv(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Symv(const Layout, const Triangle, + const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Symmetric banded matrix-vector multiplication: SSBMV/DSBMV/HSBMV +template +StatusCode Sbmv(const Layout layout, const Triangle triangle, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsbmv(queue_cpp, event); + routine.DoSbmv(layout, triangle, + n, k, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Symmetric packed matrix-vector multiplication: SSPMV/DSPMV/HSPMV +template +StatusCode Spmv(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr ap_buffer, const size_t ap_offset, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xspmv(queue_cpp, event); + routine.DoSpmv(layout, triangle, + n, + alpha, + Buffer(ap_buffer), ap_offset, + Buffer(x_buffer), x_offset, x_inc, + beta, + Buffer(y_buffer), y_offset, y_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, + const size_t, + const half, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Triangular matrix-vector multiplication: STRMV/DTRMV/CTRMV/ZTRMV/HTRMV +template +StatusCode Trmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xtrmv(queue_cpp, event); + routine.DoTrmv(layout, triangle, a_transpose, diagonal, + n, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Triangular banded matrix-vector multiplication: STBMV/DTBMV/CTBMV/ZTBMV/HTBMV +template +StatusCode Tbmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, const size_t k, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xtbmv(queue_cpp, event); + routine.DoTbmv(layout, triangle, a_transpose, diagonal, + n, k, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Triangular packed matrix-vector multiplication: STPMV/DTPMV/CTPMV/ZTPMV/HTPMV +template +StatusCode Tpmv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, + const CUdeviceptr ap_buffer, const size_t ap_offset, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xtpmv(queue_cpp, event); + routine.DoTpmv(layout, triangle, a_transpose, diagonal, + n, + Buffer(ap_buffer), ap_offset, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Solves a triangular system of equations: STRSV/DTRSV/CTRSV/ZTRSV +template +StatusCode Trsv(const Layout layout, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t n, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xtrsv(queue_cpp, event); + routine.DoTrsv(layout, triangle, a_transpose, diagonal, + n, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Solves a banded triangular system of equations: STBSV/DTBSV/CTBSV/ZTBSV +template +StatusCode Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream* stream) { + return StatusCode::kNotImplemented; +} +template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Solves a packed triangular system of equations: STPSV/DTPSV/CTPSV/ZTPSV +template +StatusCode Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream* stream) { + return StatusCode::kNotImplemented; +} +template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, + const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// General rank-1 matrix update: SGER/DGER/HGER +template +StatusCode Ger(const Layout layout, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xger(queue_cpp, event); + routine.DoGer(layout, + m, n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc, + Buffer(a_buffer), a_offset, a_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Ger(const Layout, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Ger(const Layout, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Ger(const Layout, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// General rank-1 complex matrix update: CGERU/ZGERU +template +StatusCode Geru(const Layout layout, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xgeru(queue_cpp, event); + routine.DoGeru(layout, + m, n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc, + Buffer(a_buffer), a_offset, a_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Geru(const Layout, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Geru(const Layout, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// General rank-1 complex conjugated matrix update: CGERC/ZGERC +template +StatusCode Gerc(const Layout layout, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xgerc(queue_cpp, event); + routine.DoGerc(layout, + m, n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc, + Buffer(a_buffer), a_offset, a_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Gerc(const Layout, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gerc(const Layout, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Hermitian rank-1 matrix update: CHER/ZHER +template +StatusCode Her(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xher,T>(queue_cpp, event); + routine.DoHer(layout, triangle, + n, + alpha, + Buffer>(x_buffer), x_offset, x_inc, + Buffer>(a_buffer), a_offset, a_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Her(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Her(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Hermitian packed rank-1 matrix update: CHPR/ZHPR +template +StatusCode Hpr(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xhpr,T>(queue_cpp, event); + routine.DoHpr(layout, triangle, + n, + alpha, + Buffer>(x_buffer), x_offset, x_inc, + Buffer>(ap_buffer), ap_offset); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Hpr(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Hpr(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// Hermitian rank-2 matrix update: CHER2/ZHER2 +template +StatusCode Her2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xher2(queue_cpp, event); + routine.DoHer2(layout, triangle, + n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc, + Buffer(a_buffer), a_offset, a_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Her2(const Layout, const Triangle, + const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Her2(const Layout, const Triangle, + const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Hermitian packed rank-2 matrix update: CHPR2/ZHPR2 +template +StatusCode Hpr2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xhpr2(queue_cpp, event); + routine.DoHpr2(layout, triangle, + n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc, + Buffer(ap_buffer), ap_offset); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Hpr2(const Layout, const Triangle, + const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Hpr2(const Layout, const Triangle, + const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// Symmetric rank-1 matrix update: SSYR/DSYR/HSYR +template +StatusCode Syr(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsyr(queue_cpp, event); + routine.DoSyr(layout, triangle, + n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(a_buffer), a_offset, a_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Syr(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr(const Layout, const Triangle, + const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Symmetric packed rank-1 matrix update: SSPR/DSPR/HSPR +template +StatusCode Spr(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xspr(queue_cpp, event); + routine.DoSpr(layout, triangle, + n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(ap_buffer), ap_offset); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Spr(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Spr(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Spr(const Layout, const Triangle, + const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// Symmetric rank-2 matrix update: SSYR2/DSYR2/HSYR2 +template +StatusCode Syr2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsyr2(queue_cpp, event); + routine.DoSyr2(layout, triangle, + n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc, + Buffer(a_buffer), a_offset, a_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Syr2(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr2(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr2(const Layout, const Triangle, + const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Symmetric packed rank-2 matrix update: SSPR2/DSPR2/HSPR2 +template +StatusCode Spr2(const Layout layout, const Triangle triangle, + const size_t n, + const T alpha, + const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, + const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, + CUdeviceptr ap_buffer, const size_t ap_offset, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xspr2(queue_cpp, event); + routine.DoSpr2(layout, triangle, + n, + alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc, + Buffer(ap_buffer), ap_offset); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Spr2(const Layout, const Triangle, + const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Spr2(const Layout, const Triangle, + const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Spr2(const Layout, const Triangle, + const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// ================================================================================================= +// BLAS level-3 (matrix-matrix) routines +// ================================================================================================= + +// General matrix-matrix multiplication: SGEMM/DGEMM/CGEMM/ZGEMM/HGEMM +template +StatusCode Gemm(const Layout layout, const Transpose a_transpose, const Transpose b_transpose, + const size_t m, const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xgemm(queue_cpp, event); + routine.DoGemm(layout, a_transpose, b_transpose, + m, n, k, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld, + beta, + Buffer(c_buffer), c_offset, c_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Symmetric matrix-matrix multiplication: SSYMM/DSYMM/CSYMM/ZSYMM/HSYMM +template +StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsymm(queue_cpp, event); + routine.DoSymm(layout, side, triangle, + m, n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld, + beta, + Buffer(c_buffer), c_offset, c_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Hermitian matrix-matrix multiplication: CHEMM/ZHEMM +template +StatusCode Hemm(const Layout layout, const Side side, const Triangle triangle, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xhemm(queue_cpp, event); + routine.DoHemm(layout, side, triangle, + m, n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld, + beta, + Buffer(c_buffer), c_offset, c_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Hemm(const Layout, const Side, const Triangle, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Hemm(const Layout, const Side, const Triangle, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Rank-K update of a symmetric matrix: SSYRK/DSYRK/CSYRK/ZSYRK/HSYRK +template +StatusCode Syrk(const Layout layout, const Triangle triangle, const Transpose a_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsyrk(queue_cpp, event); + routine.DoSyrk(layout, triangle, a_transpose, + n, k, + alpha, + Buffer(a_buffer), a_offset, a_ld, + beta, + Buffer(c_buffer), c_offset, c_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Rank-K update of a hermitian matrix: CHERK/ZHERK +template +StatusCode Herk(const Layout layout, const Triangle triangle, const Transpose a_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xherk,T>(queue_cpp, event); + routine.DoHerk(layout, triangle, a_transpose, + n, k, + alpha, + Buffer>(a_buffer), a_offset, a_ld, + beta, + Buffer>(c_buffer), c_offset, c_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Herk(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Herk(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Rank-2K update of a symmetric matrix: SSYR2K/DSYR2K/CSYR2K/ZSYR2K/HSYR2K +template +StatusCode Syr2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const T beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xsyr2k(queue_cpp, event); + routine.DoSyr2k(layout, triangle, ab_transpose, + n, k, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld, + beta, + Buffer(c_buffer), c_offset, c_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double2, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const half, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Rank-2K update of a hermitian matrix: CHER2K/ZHER2K +template +StatusCode Her2k(const Layout layout, const Triangle triangle, const Transpose ab_transpose, + const size_t n, const size_t k, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + const U beta, + CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xher2k(queue_cpp, event); + routine.DoHer2k(layout, triangle, ab_transpose, + n, k, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld, + beta, + Buffer(c_buffer), c_offset, c_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Her2k(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const float, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Her2k(const Layout, const Triangle, const Transpose, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + const CUdeviceptr, const size_t, const size_t, + const double, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Triangular matrix-matrix multiplication: STRMM/DTRMM/CTRMM/ZTRMM/HTRMM +template +StatusCode Trmm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xtrmm(queue_cpp, event); + routine.DoTrmm(layout, side, triangle, a_transpose, diagonal, + m, n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Solves a triangular system of equations: STRSM/DTRSM/CTRSM/ZTRSM +template +StatusCode Trsm(const Layout layout, const Side side, const Triangle triangle, const Transpose a_transpose, const Diagonal diagonal, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xtrsm(queue_cpp, event); + routine.DoTrsm(layout, side, triangle, a_transpose, diagonal, + m, n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// ================================================================================================= +// Extra non-BLAS routines (level-X) +// ================================================================================================= + +// Scaling and out-place transpose/copy (non-BLAS function): SOMATCOPY/DOMATCOPY/COMATCOPY/ZOMATCOPY/HOMATCOPY +template +StatusCode Omatcopy(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, + const T alpha, + const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, + CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xomatcopy(queue_cpp, event); + routine.DoOmatcopy(layout, a_transpose, + m, n, + alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, + const size_t, const size_t, + const float, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, + const size_t, const size_t, + const double, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, + const size_t, const size_t, + const float2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, + const size_t, const size_t, + const double2, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); +template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, + const size_t, const size_t, + const half, + const CUdeviceptr, const size_t, const size_t, + CUdeviceptr, const size_t, const size_t, + CUstream*); + +// Im2col function (non-BLAS function): SIM2COL/DIM2COL/CIM2COL/ZIM2COL/HIM2COL +template +StatusCode Im2col(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, + const CUdeviceptr im_buffer, const size_t im_offset, + CUdeviceptr col_buffer, const size_t col_offset, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xim2col(queue_cpp, event); + routine.DoIm2col(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + Buffer(im_buffer), im_offset, + Buffer(col_buffer), col_offset); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); +template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + CUstream*); + +// Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED +template +StatusCode AxpyBatched(const size_t n, + const T *alphas, + const CUdeviceptr x_buffer, const size_t *x_offsets, const size_t x_inc, + CUdeviceptr y_buffer, const size_t *y_offsets, const size_t y_inc, + const size_t batch_count, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = XaxpyBatched(queue_cpp, event); + auto alphas_cpp = std::vector(); + auto x_offsets_cpp = std::vector(); + auto y_offsets_cpp = std::vector(); + for (auto batch = size_t{0}; batch < batch_count; ++batch) { + alphas_cpp.push_back(alphas[batch]); + x_offsets_cpp.push_back(x_offsets[batch]); + y_offsets_cpp.push_back(y_offsets[batch]); + } + routine.DoAxpyBatched(n, + alphas_cpp, + Buffer(x_buffer), x_offsets_cpp, x_inc, + Buffer(y_buffer), y_offsets_cpp, y_inc, + batch_count); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API AxpyBatched(const size_t, + const float*, + const CUdeviceptr, const size_t*, const size_t, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API AxpyBatched(const size_t, + const double*, + const CUdeviceptr, const size_t*, const size_t, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API AxpyBatched(const size_t, + const float2*, + const CUdeviceptr, const size_t*, const size_t, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API AxpyBatched(const size_t, + const double2*, + const CUdeviceptr, const size_t*, const size_t, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API AxpyBatched(const size_t, + const half*, + const CUdeviceptr, const size_t*, const size_t, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); + +// Batched version of GEMM: SGEMMBATCHED/DGEMMBATCHED/CGEMMBATCHED/ZGEMMBATCHED/HGEMMBATCHED +template +StatusCode GemmBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose, + const size_t m, const size_t n, const size_t k, + const T *alphas, + const CUdeviceptr a_buffer, const size_t *a_offsets, const size_t a_ld, + const CUdeviceptr b_buffer, const size_t *b_offsets, const size_t b_ld, + const T *betas, + CUdeviceptr c_buffer, const size_t *c_offsets, const size_t c_ld, + const size_t batch_count, + CUstream* stream) { + try { + auto queue_cpp = Queue(*queue); + auto routine = XgemmBatched(queue_cpp, event); + auto alphas_cpp = std::vector(); + auto betas_cpp = std::vector(); + auto a_offsets_cpp = std::vector(); + auto b_offsets_cpp = std::vector(); + auto c_offsets_cpp = std::vector(); + for (auto batch = size_t{0}; batch < batch_count; ++batch) { + alphas_cpp.push_back(alphas[batch]); + betas_cpp.push_back(betas[batch]); + a_offsets_cpp.push_back(a_offsets[batch]); + b_offsets_cpp.push_back(b_offsets[batch]); + c_offsets_cpp.push_back(c_offsets[batch]); + } + routine.DoGemmBatched(layout, a_transpose, b_transpose, + m, n, k, + alphas_cpp, + Buffer(a_buffer), a_offsets_cpp, a_ld, + Buffer(b_buffer), b_offsets_cpp, b_ld, + betas_cpp, + Buffer(c_buffer), c_offsets_cpp, c_ld, + batch_count); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const float*, + const CUdeviceptr, const size_t*, const size_t, + const CUdeviceptr, const size_t*, const size_t, + const float*, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const double*, + const CUdeviceptr, const size_t*, const size_t, + const CUdeviceptr, const size_t*, const size_t, + const double*, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const float2*, + const CUdeviceptr, const size_t*, const size_t, + const CUdeviceptr, const size_t*, const size_t, + const float2*, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const double2*, + const CUdeviceptr, const size_t*, const size_t, + const CUdeviceptr, const size_t*, const size_t, + const double2*, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); +template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, + const size_t, const size_t, const size_t, + const half*, + const CUdeviceptr, const size_t*, const size_t, + const CUdeviceptr, const size_t*, const size_t, + const half*, + CUdeviceptr, const size_t*, const size_t, + const size_t, + CUstream*); + +// ================================================================================================= +} // namespace clblast diff --git a/src/cupp11.hpp b/src/cupp11.hpp new file mode 100644 index 00000000..988366ea --- /dev/null +++ b/src/cupp11.hpp @@ -0,0 +1,770 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements a bunch of C++11 classes that act as wrappers around OpenCL objects and API +// calls. The main benefits are increased abstraction, automatic memory management, and portability. +// Portability here means that a similar header exists for CUDA with the same classes and +// interfaces. In other words, moving from the OpenCL API to the CUDA API becomes a one-line change. +// +// This file is taken from the CLCudaAPI project and +// therefore contains the following header copyright notice: +// +// ================================================================================================= +// +// Copyright 2015 SURFsara +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ================================================================================================= + +#ifndef CLBLAST_CUPP11_H_ +#define CLBLAST_CUPP11_H_ + +// C++ +#include // std::copy +#include // std::string +#include // std::vector +#include // std::shared_ptr + +// CUDA +#include // CUDA driver API +#include // NVIDIA runtime compilation API + +// Exception classes +#include "cxpp11_common.hpp" + +namespace clblast { +// ================================================================================================= + +// Max-length of strings +constexpr auto kStringLength = 256; + +// ================================================================================================= + +// Represents a runtime error returned by a CUDA driver API function +class CLCudaAPIError : public ErrorCode { +public: + explicit CLCudaAPIError(CUresult status, const std::string &where): + ErrorCode(status, where, "CUDA error: " + where + ": " + + GetErrorName(status) + " --> " + GetErrorString(status)) { + } + + static void Check(const CUresult status, const std::string &where) { + if (status != CUDA_SUCCESS) { + throw CLCudaAPIError(status, where); + } + } + + static void CheckDtor(const CUresult status, const std::string &where) { + if (status != CUDA_SUCCESS) { + fprintf(stderr, "CLCudaAPI: %s (ignoring)\n", CLCudaAPIError(status, where).what()); + } + } + +private: + std::string GetErrorName(CUresult status) const { + const char* status_code; + cuGetErrorName(status, &status_code); + return std::string(status_code); + } + std::string GetErrorString(CUresult status) const { + const char* status_string; + cuGetErrorString(status, &status_string); + return std::string(status_string); + } +}; + +// Represents a runtime error returned by a CUDA runtime compilation API function +class CLCudaAPINVRTCError : public ErrorCode { +public: + explicit CLCudaAPINVRTCError(nvrtcResult status, const std::string &where): + ErrorCode(status, where, "CUDA NVRTC error: " + where + ": " + GetErrorString(status)) { + } + + static void Check(const nvrtcResult status, const std::string &where) { + if (status != NVRTC_SUCCESS) { + throw CLCudaAPINVRTCError(status, where); + } + } + + static void CheckDtor(const nvrtcResult status, const std::string &where) { + if (status != NVRTC_SUCCESS) { + fprintf(stderr, "CLCudaAPI: %s (ignoring)\n", CLCudaAPINVRTCError(status, where).what()); + } + } + +private: + std::string GetErrorString(nvrtcResult status) const { + const char* status_string = nvrtcGetErrorString(status); + return std::string(status_string); + } +}; + +// Exception returned when building a program +using CLCudaAPIBuildError = CLCudaAPINVRTCError; + +// ================================================================================================= + +// Error occurred in CUDA driver or runtime compilation API +#define CheckError(call) CLCudaAPIError::Check(call, CLCudaAPIError::TrimCallString(#call)) +#define CheckErrorNVRTC(call) CLCudaAPINVRTCError::Check(call, CLCudaAPINVRTCError::TrimCallString(#call)) + +// Error occurred in CUDA driver or runtime compilation API (no-exception version for destructors) +#define CheckErrorDtor(call) CLCudaAPIError::CheckDtor(call, CLCudaAPIError::TrimCallString(#call)) +#define CheckErrorDtorNVRTC(call) CLCudaAPINVRTCError::CheckDtor(call, CLCudaAPINVRTCError::TrimCallString(#call)) + +// ================================================================================================= + +// C++11 version of two 'CUevent' pointers +class Event { +public: + // Note that there is no constructor based on the regular CUDA data-type because of extra state + + // Regular constructor with memory management + explicit Event(): + start_(new CUevent, [](CUevent* e) { CheckErrorDtor(cuEventDestroy(*e)); delete e; }), + end_(new CUevent, [](CUevent* e) { CheckErrorDtor(cuEventDestroy(*e)); delete e; }) { + CheckError(cuEventCreate(start_.get(), CU_EVENT_DEFAULT)); + CheckError(cuEventCreate(end_.get(), CU_EVENT_DEFAULT)); + } + + // Waits for completion of this event (not implemented for CUDA) + void WaitForCompletion() const { } + + // Retrieves the elapsed time of the last recorded event + float GetElapsedTime() const { + auto result = 0.0f; + cuEventElapsedTime(&result, *start_, *end_); + return result; + } + + // Accessors to the private data-members + const CUevent& start() const { return *start_; } + const CUevent& end() const { return *end_; } + Event* pointer() { return this; } +private: + std::shared_ptr start_; + std::shared_ptr end_; +}; + +// Pointer to a CUDA event +using EventPointer = Event*; + +// ================================================================================================= + +// Raw platform ID type +using RawPlatformID = size_t; + +// The CUDA platform: initializes the CUDA driver API +class Platform { +public: + + // Initializes the platform. Note that the platform ID variable is not actually used for CUDA. + explicit Platform(const size_t platform_id) : platform_id_(0) { + if (platform_id != 0) { throw LogicError("CUDA back-end requires a platform ID of 0"); } + CheckError(cuInit(0)); + } + + // Methods to retrieve platform information + std::string Name() const { return "CUDA"; } + std::string Vendor() const { return "NVIDIA Corporation"; } + std::string Version() const { + auto result = 0; + CheckError(cuDriverGetVersion(&result)); + return "CUDA driver "+std::to_string(result); + } + + // Returns the number of devices on this platform + size_t NumDevices() const { + auto result = 0; + CheckError(cuDeviceGetCount(&result)); + return static_cast(result); + } + + // Accessor to the raw ID (which doesn't exist in the CUDA back-end, this is always just 0) + const RawPlatformID& operator()() const { return platform_id_; } +private: + const size_t platform_id_; +}; + +// Retrieves a vector with all platforms. Note that there is just one platform in CUDA. +inline std::vector GetAllPlatforms() { + auto all_platforms = std::vector{ Platform(size_t{0}) }; + return all_platforms; +} + +// ================================================================================================= + +// Raw device ID type +using RawDeviceID = CUdevice; + +// C++11 version of 'CUdevice' +class Device { +public: + + // Constructor based on the regular CUDA data-type + explicit Device(const CUdevice device): device_(device) { } + + // Initialization + explicit Device(const Platform &platform, const size_t device_id) { + auto num_devices = platform.NumDevices(); + if (num_devices == 0) { + throw RuntimeError("Device: no devices found"); + } + if (device_id >= num_devices) { + throw RuntimeError("Device: invalid device ID "+std::to_string(device_id)); + } + + CheckError(cuDeviceGet(&device_, device_id)); + } + + // Methods to retrieve device information + RawPlatformID PlatformID() const { return 0; } + std::string Version() const { + auto result = 0; + CheckError(cuDriverGetVersion(&result)); + return "CUDA driver "+std::to_string(result); + } + size_t VersionNumber() const { + auto result = 0; + CheckError(cuDriverGetVersion(&result)); + return static_cast(result); + } + std::string Vendor() const { return "NVIDIA Corporation"; } + std::string Name() const { + auto result = std::string{}; + result.resize(kStringLength); + CheckError(cuDeviceGetName(&result[0], result.size(), device_)); + return result; + } + std::string Type() const { return "GPU"; } + size_t MaxWorkGroupSize() const {return GetInfo(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK); } + size_t MaxWorkItemDimensions() const { return size_t{3}; } + std::vector MaxWorkItemSizes() const { + return std::vector{GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X), + GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y), + GetInfo(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z)}; + } + unsigned long LocalMemSize() const { + return static_cast(GetInfo(CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK)); + } + + std::string Capabilities() const { + const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR); + const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR); + return "SM"+std::to_string(major)+"."+std::to_string(minor); + } + bool HasExtension(const std::string &extension) const { return false; } + bool SupportsFP64() const { return true; } + bool SupportsFP16() const { + const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR); + const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR); + if (major > 5) { return true; } // SM 6.x, 7.x and higher + if (major == 5 && minor == 3) { return true; } // SM 5.3 + return false; + } + + size_t CoreClock() const { return 1e-3*GetInfo(CU_DEVICE_ATTRIBUTE_CLOCK_RATE); } + size_t ComputeUnits() const { return GetInfo(CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT); } + unsigned long MemorySize() const { + auto result = size_t{0}; + CheckError(cuDeviceTotalMem(&result, device_)); + return static_cast(result); + } + unsigned long MaxAllocSize() const { return MemorySize(); } + size_t MemoryClock() const { return 1e-3*GetInfo(CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE); } + size_t MemoryBusWidth() const { return GetInfo(CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH); } + + // Configuration-validity checks + bool IsLocalMemoryValid(const size_t local_mem_usage) const { + return (local_mem_usage <= LocalMemSize()); + } + bool IsThreadConfigValid(const std::vector &local) const { + auto local_size = size_t{1}; + for (const auto &item: local) { local_size *= item; } + for (auto i=size_t{0}; i MaxWorkItemSizes()[i]) { return false; } + } + if (local_size > MaxWorkGroupSize()) { return false; } + if (local.size() > MaxWorkItemDimensions()) { return false; } + return true; + } + + // Query for a specific type of device or brand + bool IsCPU() const { return false; } + bool IsGPU() const { return true; } + bool IsAMD() const { return false; } + bool IsNVIDIA() const { return true; } + bool IsIntel() const { return false; } + bool IsARM() const { return false; } + + // Platform specific extensions + std::string AMDBoardName() const { return ""; } + std::string NVIDIAComputeCapability() const { return Capabilities(); } + + // Accessor to the private data-member + const RawDeviceID& operator()() const { return device_; } +private: + CUdevice device_; + + // Private helper function + size_t GetInfo(const CUdevice_attribute info) const { + auto result = 0; + CheckError(cuDeviceGetAttribute(&result, info, device_)); + return static_cast(result); + } +}; + +// ================================================================================================= + +// Raw context type +using RawContext = CUcontext; + +// C++11 version of 'CUcontext' +class Context { +public: + + // Constructor based on the regular CUDA data-type: memory management is handled elsewhere + explicit Context(const CUcontext context): + context_(new CUcontext) { + *context_ = context; + } + + // Regular constructor with memory management + explicit Context(const Device &device): + context_(new CUcontext, [](CUcontext* c) { + if (*c) { CheckErrorDtor(cuCtxDestroy(*c)); } + delete c; + }) { + CheckError(cuCtxCreate(context_.get(), 0, device())); + } + + // Accessor to the private data-member + const RawContext& operator()() const { return *context_; } + RawContext* pointer() const { return &(*context_); } +private: + std::shared_ptr context_; +}; + +// Pointer to a raw CUDA context +using ContextPointer = CUcontext*; + +// ================================================================================================= + +// C++11 version of 'nvrtcProgram'. Additionally holds the program's source code. +class Program { +public: + // Note that there is no constructor based on the regular CUDA data-type because of extra state + + // Source-based constructor with memory management + explicit Program(const Context &, std::string source): + program_(new nvrtcProgram, [](nvrtcProgram* p) { + if (*p) { CheckErrorDtorNVRTC(nvrtcDestroyProgram(p)); } + delete p; + }), + source_(std::move(source)), + from_binary_(false) { + const auto source_ptr = &source_[0]; + CheckErrorNVRTC(nvrtcCreateProgram(program_.get(), source_ptr, nullptr, 0, nullptr, nullptr)); + } + + // PTX-based constructor + explicit Program(const Device &device, const Context &context, const std::string &binary): + program_(nullptr), // not used + source_(binary), + from_binary_(true) { + } + + // Compiles the device program and checks whether or not there are any warnings/errors + void Build(const Device &, std::vector &options) { + if (from_binary_) { return; } + auto raw_options = std::vector(); + for (const auto &option: options) { + raw_options.push_back(option.c_str()); + } + auto status = nvrtcCompileProgram(*program_, raw_options.size(), raw_options.data()); + CLCudaAPINVRTCError::Check(status, "nvrtcCompileProgram"); + } + + // Confirms whether a certain status code is an actual compilation error or warning + bool StatusIsCompilationWarningOrError(const nvrtcResult status) const { + return (status == NVRTC_ERROR_INVALID_INPUT); + } + + // Retrieves the warning/error message from the compiler (if any) + std::string GetBuildInfo(const Device &) const { + if (from_binary_) { return std::string{}; } + auto bytes = size_t{0}; + CheckErrorNVRTC(nvrtcGetProgramLogSize(*program_, &bytes)); + auto result = std::string{}; + result.resize(bytes); + CheckErrorNVRTC(nvrtcGetProgramLog(*program_, &result[0])); + return result; + } + + // Retrieves an intermediate representation of the compiled program (i.e. PTX) + std::string GetIR() const { + if (from_binary_) { return source_; } // holds the PTX + auto bytes = size_t{0}; + CheckErrorNVRTC(nvrtcGetPTXSize(*program_, &bytes)); + auto result = std::string{}; + result.resize(bytes); + CheckErrorNVRTC(nvrtcGetPTX(*program_, &result[0])); + return result; + } + + // Accessor to the private data-member + const nvrtcProgram& operator()() const { return *program_; } +private: + std::shared_ptr program_; + const std::string source_; + const bool from_binary_; +}; + +// ================================================================================================= + +// Raw command-queue type +using RawCommandQueue = CUstream; + +// C++11 version of 'CUstream' +class Queue { +public: + // Note that there is no constructor based on the regular CUDA data-type because of extra state + + // Regular constructor with memory management + explicit Queue(const Context &context, const Device &device): + queue_(new CUstream, [](CUstream* s) { + if (*s) { CheckErrorDtor(cuStreamDestroy(*s)); } + delete s; + }), + context_(context), + device_(device) { + CheckError(cuStreamCreate(queue_.get(), CU_STREAM_NON_BLOCKING)); + } + + // Synchronizes the queue and optionally also an event + void Finish(Event &event) const { + CheckError(cuEventSynchronize(event.end())); + Finish(); + } + void Finish() const { + CheckError(cuStreamSynchronize(*queue_)); + } + + // Retrieves the corresponding context or device + Context GetContext() const { return context_; } + Device GetDevice() const { return device_; } + + // Accessor to the private data-member + const RawCommandQueue& operator()() const { return *queue_; } +private: + std::shared_ptr queue_; + const Context context_; + const Device device_; +}; + +// ================================================================================================= + +// C++11 version of page-locked host memory +template +class BufferHost { +public: + + // Regular constructor with memory management + explicit BufferHost(const Context &, const size_t size): + buffer_(new void*, [](void** m) { CheckError(cuMemFreeHost(*m)); delete m; }), + size_(size) { + CheckError(cuMemAllocHost(buffer_.get(), size*sizeof(T))); + } + + // Retrieves the actual allocated size in bytes + size_t GetSize() const { + return size_*sizeof(T); + } + + // Compatibility with std::vector + size_t size() const { return size_; } + T* begin() { return &static_cast(*buffer_)[0]; } + T* end() { return &static_cast(*buffer_)[size_-1]; } + T& operator[](const size_t i) { return static_cast(*buffer_)[i]; } + T* data() { return static_cast(*buffer_); } + const T* data() const { return static_cast(*buffer_); } + +private: + std::shared_ptr buffer_; + const size_t size_; +}; + +// ================================================================================================= + +// Enumeration of buffer access types +enum class BufferAccess { kReadOnly, kWriteOnly, kReadWrite, kNotOwned }; + +// C++11 version of 'CUdeviceptr' +template +class Buffer { +public: + + // Constructor based on the regular CUDA data-type: memory management is handled elsewhere + explicit Buffer(const CUdeviceptr buffer): + buffer_(new CUdeviceptr), + access_(BufferAccess::kNotOwned) { + *buffer_ = buffer; + } + + // Regular constructor with memory management. If this class does not own the buffer object, then + // the memory will not be freed automatically afterwards. + explicit Buffer(const Context &, const BufferAccess access, const size_t size): + buffer_(new CUdeviceptr, [access](CUdeviceptr* m) { + if (access != BufferAccess::kNotOwned) { CheckError(cuMemFree(*m)); } + delete m; + }), + access_(access) { + CheckError(cuMemAlloc(buffer_.get(), size*sizeof(T))); + } + + // As above, but now with read/write access as a default + explicit Buffer(const Context &context, const size_t size): + Buffer(context, BufferAccess::kReadWrite, size) { + } + + // Constructs a new buffer based on an existing host-container + template + explicit Buffer(const Context &context, const Queue &queue, Iterator start, Iterator end): + Buffer(context, BufferAccess::kReadWrite, static_cast(end - start)) { + auto size = static_cast(end - start); + auto pointer = &*start; + CheckError(cuMemcpyHtoDAsync(*buffer_, pointer, size*sizeof(T), queue())); + queue.Finish(); + } + + // Copies from device to host: reading the device buffer a-synchronously + void ReadAsync(const Queue &queue, const size_t size, T* host, const size_t offset = 0) const { + if (access_ == BufferAccess::kWriteOnly) { + throw LogicError("Buffer: reading from a write-only buffer"); + } + CheckError(cuMemcpyDtoHAsync(host, *buffer_ + offset*sizeof(T), size*sizeof(T), queue())); + } + void ReadAsync(const Queue &queue, const size_t size, std::vector &host, + const size_t offset = 0) const { + if (host.size() < size) { + throw LogicError("Buffer: target host buffer is too small"); + } + ReadAsync(queue, size, host.data(), offset); + } + void ReadAsync(const Queue &queue, const size_t size, BufferHost &host, + const size_t offset = 0) const { + if (host.size() < size) { + throw LogicError("Buffer: target host buffer is too small"); + } + ReadAsync(queue, size, host.data(), offset); + } + + // Copies from device to host: reading the device buffer + void Read(const Queue &queue, const size_t size, T* host, const size_t offset = 0) const { + ReadAsync(queue, size, host, offset); + queue.Finish(); + } + void Read(const Queue &queue, const size_t size, std::vector &host, + const size_t offset = 0) const { + Read(queue, size, host.data(), offset); + } + void Read(const Queue &queue, const size_t size, BufferHost &host, + const size_t offset = 0) const { + Read(queue, size, host.data(), offset); + } + + // Copies from host to device: writing the device buffer a-synchronously + void WriteAsync(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) { + if (access_ == BufferAccess::kReadOnly) { + throw LogicError("Buffer: writing to a read-only buffer"); + } + if (GetSize() < (offset+size)*sizeof(T)) { + throw LogicError("Buffer: target device buffer is too small"); + } + CheckError(cuMemcpyHtoDAsync(*buffer_ + offset*sizeof(T), host, size*sizeof(T), queue())); + } + void WriteAsync(const Queue &queue, const size_t size, const std::vector &host, + const size_t offset = 0) { + WriteAsync(queue, size, host.data(), offset); + } + void WriteAsync(const Queue &queue, const size_t size, const BufferHost &host, + const size_t offset = 0) { + WriteAsync(queue, size, host.data(), offset); + } + + // Copies from host to device: writing the device buffer + void Write(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) { + WriteAsync(queue, size, host, offset); + queue.Finish(); + } + void Write(const Queue &queue, const size_t size, const std::vector &host, + const size_t offset = 0) { + Write(queue, size, host.data(), offset); + } + void Write(const Queue &queue, const size_t size, const BufferHost &host, + const size_t offset = 0) { + Write(queue, size, host.data(), offset); + } + + // Copies the contents of this buffer into another device buffer + void CopyToAsync(const Queue &queue, const size_t size, const Buffer &destination) const { + CheckError(cuMemcpyDtoDAsync(destination(), *buffer_, size*sizeof(T), queue())); + } + void CopyTo(const Queue &queue, const size_t size, const Buffer &destination) const { + CopyToAsync(queue, size, destination); + queue.Finish(); + } + + // Retrieves the actual allocated size in bytes + size_t GetSize() const { + auto result = size_t{0}; + CheckError(cuMemGetAddressRange(nullptr, &result, *buffer_)); + return result; + } + + // Accessors to the private data-members + CUdeviceptr operator()() const { return *buffer_; } + CUdeviceptr& operator()() { return *buffer_; } +private: + std::shared_ptr buffer_; + const BufferAccess access_; +}; + +// ================================================================================================= + +// C++11 version of 'CUfunction' +class Kernel { +public: + + // Constructor based on the regular CUDA data-type: memory management is handled elsewhere + explicit Kernel(const CUmodule module, const CUfunction kernel): + module_(module), + kernel_(kernel) { + } + + // Regular constructor with memory management + explicit Kernel(const Program &program, const std::string &name) { + CheckError(cuModuleLoadDataEx(&module_, program.GetIR().data(), 0, nullptr, nullptr)); + CheckError(cuModuleGetFunction(&kernel_, module_, name.c_str())); + } + + // Sets a kernel argument at the indicated position. This stores both the value of the argument + // (as raw bytes) and the index indicating where this value can be found. + template + void SetArgument(const size_t index, const T &value) { + if (index >= arguments_indices_.size()) { arguments_indices_.resize(index+1); } + arguments_indices_[index] = arguments_data_.size(); + for (auto j=size_t(0); j(&value)[j]); + } + } + template + void SetArgument(const size_t index, Buffer &value) { + SetArgument(index, value()); + } + + // Sets all arguments in one go using parameter packs. Note that this resets all previously set + // arguments using 'SetArgument' or 'SetArguments'. + template + void SetArguments(Args&... args) { + arguments_indices_.clear(); + arguments_data_.clear(); + SetArgumentsRecursive(0, args...); + } + + // Retrieves the amount of local memory used per work-group for this kernel. Note that this the + // shared memory in CUDA terminology. + unsigned long LocalMemUsage(const Device &) const { + auto result = 0; + CheckError(cuFuncGetAttribute(&result, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel_)); + return static_cast(result); + } + + // Retrieves the name of the kernel + std::string GetFunctionName() const { + return std::string{"unknown"}; // Not implemented for the CUDA backend + } + + // Launches a kernel onto the specified queue + void Launch(const Queue &queue, const std::vector &global, + const std::vector &local, EventPointer event) { + + // Creates the grid (number of threadblocks) and sets the block sizes (threads per block) + auto grid = std::vector{1, 1, 1}; + auto block = std::vector{1, 1, 1}; + if (global.size() != local.size()) { throw LogicError("invalid thread/workgroup dimensions"); } + for (auto i=size_t{0}; i pointers; + for (auto &index: arguments_indices_) { + pointers.push_back(&arguments_data_[index]); + } + + // Launches the kernel, its execution time is recorded by events + CheckError(cuEventRecord(event->start(), queue())); + CheckError(cuLaunchKernel(kernel_, grid[0], grid[1], grid[2], block[0], block[1], block[2], + 0, queue(), pointers.data(), nullptr)); + CheckError(cuEventRecord(event->end(), queue())); + } + + // As above, but with an event waiting list + // TODO: Implement this function + void Launch(const Queue &queue, const std::vector &global, + const std::vector &local, EventPointer event, + std::vector& waitForEvents) { + if (local.size() == 0) { + throw LogicError("Kernel: launching with a default workgroup size is not implemented for the CUDA back-end"); + } + else if (waitForEvents.size() != 0) { + throw LogicError("Kernel: launching with an event waiting list is not implemented for the CUDA back-end"); + } + else { + return Launch(queue, global, local, event); + } + } + + // Accessors to the private data-members + const CUfunction& operator()() const { return kernel_; } + CUfunction operator()() { return kernel_; } +private: + CUmodule module_; + CUfunction kernel_; + std::vector arguments_indices_; // Indices of the arguments + std::vector arguments_data_; // The arguments data as raw bytes + + // Internal implementation for the recursive SetArguments function. + template + void SetArgumentsRecursive(const size_t index, T &first) { + SetArgument(index, first); + } + template + void SetArgumentsRecursive(const size_t index, T &first, Args&... args) { + SetArgument(index, first); + SetArgumentsRecursive(index+1, args...); + } +}; + +// ================================================================================================= +} // namespace clblast + +// CLBLAST_CUPP11_H_ +#endif diff --git a/src/utilities/buffer_test.hpp b/src/utilities/buffer_test.hpp index b5693181..a5b6be4b 100644 --- a/src/utilities/buffer_test.hpp +++ b/src/utilities/buffer_test.hpp @@ -15,7 +15,7 @@ #ifndef CLBLAST_BUFFER_TEST_H_ #define CLBLAST_BUFFER_TEST_H_ -#include "clblast.h" +#include "utilities/utilities.hpp namespace clblast { // ================================================================================================= diff --git a/src/utilities/utilities.hpp b/src/utilities/utilities.hpp index b2949c27..f56226be 100644 --- a/src/utilities/utilities.hpp +++ b/src/utilities/utilities.hpp @@ -21,8 +21,13 @@ #include #include -#include "clpp11.hpp" -#include "clblast.h" +#ifdef OPENCL_API + #include "clpp11.hpp" + #include "clblast.h" +#elif CUDA_API + #include "cupp11.hpp" + #include "clblast_cuda.h" +#endif #include "clblast_half.h" #include "utilities/clblast_exceptions.hpp" #include "utilities/msvc.hpp" -- cgit v1.2.3 From cc5b4754250b3c03b9b0f8d72f32d1eacac15b18 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Thu, 12 Oct 2017 12:20:43 +0200 Subject: CUDA API now takes context and device in instead of stream --- include/clblast_cuda.h | 112 ++--- scripts/generator/generator/cpp.py | 9 +- scripts/generator/generator/routine.py | 4 +- src/clblast_cuda.cpp | 720 +++++++++++++++++++-------------- src/utilities/buffer_test.hpp | 2 +- 5 files changed, 476 insertions(+), 371 deletions(-) diff --git a/include/clblast_cuda.h b/include/clblast_cuda.h index c125c302..e28f68e5 100644 --- a/include/clblast_cuda.h +++ b/include/clblast_cuda.h @@ -103,7 +103,7 @@ StatusCode Rotg(CUdeviceptr sa_buffer, const size_t sa_offset, CUdeviceptr sb_buffer, const size_t sb_offset, CUdeviceptr sc_buffer, const size_t sc_offset, CUdeviceptr ss_buffer, const size_t ss_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // Generate modified givens plane rotation: SROTMG/DROTMG template @@ -112,7 +112,7 @@ StatusCode Rotmg(CUdeviceptr sd1_buffer, const size_t sd1_offset, CUdeviceptr sx1_buffer, const size_t sx1_offset, const CUdeviceptr sy1_buffer, const size_t sy1_offset, CUdeviceptr sparam_buffer, const size_t sparam_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // Apply givens plane rotation: SROT/DROT template @@ -121,7 +121,7 @@ StatusCode Rot(const size_t n, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, const T cos, const T sin, - CUstream* stream); + const CUcontext context, const CUdevice device); // Apply modified givens plane rotation: SROTM/DROTM template @@ -129,28 +129,28 @@ StatusCode Rotm(const size_t n, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr sparam_buffer, const size_t sparam_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // Swap two vectors: SSWAP/DSWAP/CSWAP/ZSWAP/HSWAP template StatusCode Swap(const size_t n, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Vector scaling: SSCAL/DSCAL/CSCAL/ZSCAL/HSCAL template StatusCode Scal(const size_t n, const T alpha, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Vector copy: SCOPY/DCOPY/CCOPY/ZCOPY/HCOPY template StatusCode Copy(const size_t n, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Vector-times-constant plus vector: SAXPY/DAXPY/CAXPY/ZAXPY/HAXPY template @@ -158,7 +158,7 @@ StatusCode Axpy(const size_t n, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Dot product of two vectors: SDOT/DDOT/HDOT template @@ -166,7 +166,7 @@ StatusCode Dot(const size_t n, CUdeviceptr dot_buffer, const size_t dot_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Dot product of two complex vectors: CDOTU/ZDOTU template @@ -174,7 +174,7 @@ StatusCode Dotu(const size_t n, CUdeviceptr dot_buffer, const size_t dot_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Dot product of two complex vectors, one conjugated: CDOTC/ZDOTC template @@ -182,56 +182,56 @@ StatusCode Dotc(const size_t n, CUdeviceptr dot_buffer, const size_t dot_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Euclidian norm of a vector: SNRM2/DNRM2/ScNRM2/DzNRM2/HNRM2 template StatusCode Nrm2(const size_t n, CUdeviceptr nrm2_buffer, const size_t nrm2_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Absolute sum of values in a vector: SASUM/DASUM/ScASUM/DzASUM/HASUM template StatusCode Asum(const size_t n, CUdeviceptr asum_buffer, const size_t asum_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Sum of values in a vector (non-BLAS function): SSUM/DSUM/ScSUM/DzSUM/HSUM template StatusCode Sum(const size_t n, CUdeviceptr sum_buffer, const size_t sum_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Index of absolute maximum value in a vector: iSAMAX/iDAMAX/iCAMAX/iZAMAX/iHAMAX template StatusCode Amax(const size_t n, CUdeviceptr imax_buffer, const size_t imax_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Index of absolute minimum value in a vector (non-BLAS function): iSAMIN/iDAMIN/iCAMIN/iZAMIN/iHAMIN template StatusCode Amin(const size_t n, CUdeviceptr imin_buffer, const size_t imin_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Index of maximum value in a vector (non-BLAS function): iSMAX/iDMAX/iCMAX/iZMAX/iHMAX template StatusCode Max(const size_t n, CUdeviceptr imax_buffer, const size_t imax_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Index of minimum value in a vector (non-BLAS function): iSMIN/iDMIN/iCMIN/iZMIN/iHMIN template StatusCode Min(const size_t n, CUdeviceptr imin_buffer, const size_t imin_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // ================================================================================================= // BLAS level-2 (matrix-vector) routines @@ -246,7 +246,7 @@ StatusCode Gemv(const Layout layout, const Transpose a_transpose, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // General banded matrix-vector multiplication: SGBMV/DGBMV/CGBMV/ZGBMV/HGBMV template @@ -257,7 +257,7 @@ StatusCode Gbmv(const Layout layout, const Transpose a_transpose, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian matrix-vector multiplication: CHEMV/ZHEMV template @@ -268,7 +268,7 @@ StatusCode Hemv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian banded matrix-vector multiplication: CHBMV/ZHBMV template @@ -279,7 +279,7 @@ StatusCode Hbmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian packed matrix-vector multiplication: CHPMV/ZHPMV template @@ -290,7 +290,7 @@ StatusCode Hpmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric matrix-vector multiplication: SSYMV/DSYMV/HSYMV template @@ -301,7 +301,7 @@ StatusCode Symv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric banded matrix-vector multiplication: SSBMV/DSBMV/HSBMV template @@ -312,7 +312,7 @@ StatusCode Sbmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric packed matrix-vector multiplication: SSPMV/DSPMV/HSPMV template @@ -323,7 +323,7 @@ StatusCode Spmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Triangular matrix-vector multiplication: STRMV/DTRMV/CTRMV/ZTRMV/HTRMV template @@ -331,7 +331,7 @@ StatusCode Trmv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Triangular banded matrix-vector multiplication: STBMV/DTBMV/CTBMV/ZTBMV/HTBMV template @@ -339,7 +339,7 @@ StatusCode Tbmv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const size_t k, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Triangular packed matrix-vector multiplication: STPMV/DTPMV/CTPMV/ZTPMV/HTPMV template @@ -347,7 +347,7 @@ StatusCode Tpmv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const CUdeviceptr ap_buffer, const size_t ap_offset, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Solves a triangular system of equations: STRSV/DTRSV/CTRSV/ZTRSV template @@ -355,7 +355,7 @@ StatusCode Trsv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Solves a banded triangular system of equations: STBSV/DTBSV/CTBSV/ZTBSV template @@ -363,7 +363,7 @@ StatusCode Tbsv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const size_t k, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // Solves a packed triangular system of equations: STPSV/DTPSV/CTPSV/ZTPSV template @@ -371,7 +371,7 @@ StatusCode Tpsv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const CUdeviceptr ap_buffer, const size_t ap_offset, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream); + const CUcontext context, const CUdevice device); // General rank-1 matrix update: SGER/DGER/HGER template @@ -381,7 +381,7 @@ StatusCode Ger(const Layout layout, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // General rank-1 complex matrix update: CGERU/ZGERU template @@ -391,7 +391,7 @@ StatusCode Geru(const Layout layout, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // General rank-1 complex conjugated matrix update: CGERC/ZGERC template @@ -401,7 +401,7 @@ StatusCode Gerc(const Layout layout, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian rank-1 matrix update: CHER/ZHER template @@ -410,7 +410,7 @@ StatusCode Her(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian packed rank-1 matrix update: CHPR/ZHPR template @@ -419,7 +419,7 @@ StatusCode Hpr(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian rank-2 matrix update: CHER2/ZHER2 template @@ -429,7 +429,7 @@ StatusCode Her2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian packed rank-2 matrix update: CHPR2/ZHPR2 template @@ -439,7 +439,7 @@ StatusCode Hpr2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric rank-1 matrix update: SSYR/DSYR/HSYR template @@ -448,7 +448,7 @@ StatusCode Syr(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric packed rank-1 matrix update: SSPR/DSPR/HSPR template @@ -457,7 +457,7 @@ StatusCode Spr(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric rank-2 matrix update: SSYR2/DSYR2/HSYR2 template @@ -467,7 +467,7 @@ StatusCode Syr2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric packed rank-2 matrix update: SSPR2/DSPR2/HSPR2 template @@ -477,7 +477,7 @@ StatusCode Spr2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // ================================================================================================= // BLAS level-3 (matrix-matrix) routines @@ -492,7 +492,7 @@ StatusCode Gemm(const Layout layout, const Transpose a_transpose, const Transpos const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Symmetric matrix-matrix multiplication: SSYMM/DSYMM/CSYMM/ZSYMM/HSYMM template @@ -503,7 +503,7 @@ StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Hermitian matrix-matrix multiplication: CHEMM/ZHEMM template @@ -514,7 +514,7 @@ StatusCode Hemm(const Layout layout, const Side side, const Triangle triangle, const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Rank-K update of a symmetric matrix: SSYRK/DSYRK/CSYRK/ZSYRK/HSYRK template @@ -524,7 +524,7 @@ StatusCode Syrk(const Layout layout, const Triangle triangle, const Transpose a_ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Rank-K update of a hermitian matrix: CHERK/ZHERK template @@ -534,7 +534,7 @@ StatusCode Herk(const Layout layout, const Triangle triangle, const Transpose a_ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Rank-2K update of a symmetric matrix: SSYR2K/DSYR2K/CSYR2K/ZSYR2K/HSYR2K template @@ -545,7 +545,7 @@ StatusCode Syr2k(const Layout layout, const Triangle triangle, const Transpose a const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Rank-2K update of a hermitian matrix: CHER2K/ZHER2K template @@ -556,7 +556,7 @@ StatusCode Her2k(const Layout layout, const Triangle triangle, const Transpose a const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const U beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Triangular matrix-matrix multiplication: STRMM/DTRMM/CTRMM/ZTRMM/HTRMM template @@ -565,7 +565,7 @@ StatusCode Trmm(const Layout layout, const Side side, const Triangle triangle, c const T alpha, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Solves a triangular system of equations: STRSM/DTRSM/CTRSM/ZTRSM template @@ -574,7 +574,7 @@ StatusCode Trsm(const Layout layout, const Side side, const Triangle triangle, c const T alpha, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // ================================================================================================= // Extra non-BLAS routines (level-X) @@ -587,14 +587,14 @@ StatusCode Omatcopy(const Layout layout, const Transpose a_transpose, const T alpha, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, - CUstream* stream); + const CUcontext context, const CUdevice device); // Im2col function (non-BLAS function): SIM2COL/DIM2COL/CIM2COL/ZIM2COL/HIM2COL template StatusCode Im2col(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const CUdeviceptr im_buffer, const size_t im_offset, CUdeviceptr col_buffer, const size_t col_offset, - CUstream* stream); + const CUcontext context, const CUdevice device); // Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED template @@ -603,7 +603,7 @@ StatusCode AxpyBatched(const size_t n, const CUdeviceptr x_buffer, const size_t *x_offsets, const size_t x_inc, CUdeviceptr y_buffer, const size_t *y_offsets, const size_t y_inc, const size_t batch_count, - CUstream* stream); + const CUcontext context, const CUdevice device); // Batched version of GEMM: SGEMMBATCHED/DGEMMBATCHED/CGEMMBATCHED/ZGEMMBATCHED/HGEMMBATCHED template @@ -615,7 +615,7 @@ StatusCode GemmBatched(const Layout layout, const Transpose a_transpose, const T const T *betas, CUdeviceptr c_buffer, const size_t *c_offsets, const size_t c_ld, const size_t batch_count, - CUstream* stream); + const CUcontext context, const CUdevice device); // ================================================================================================= diff --git a/scripts/generator/generator/cpp.py b/scripts/generator/generator/cpp.py index f1ee1959..5413906a 100644 --- a/scripts/generator/generator/cpp.py +++ b/scripts/generator/generator/cpp.py @@ -50,7 +50,12 @@ def clblast_cc(routine, cuda=False): if routine.implemented: result += routine.routine_header_cpp(12, "", cuda) + " {" + NL result += " try {" + NL - result += " auto queue_cpp = Queue(*queue);" + NL + if cuda: + result += " const auto context_cpp = Context(context);" + NL + result += " const auto device_cpp = Device(device);" + NL + result += " auto queue_cpp = Queue(context_cpp, device_cpp);" + NL + else: + result += " auto queue_cpp = Queue(*queue);" + NL result += " auto routine = X" + routine.plain_name() + "<" + routine.template.template + ">(queue_cpp, event);" + NL if routine.batched: result += " " + (NL + " ").join(routine.batched_transform_to_cpp()) + NL @@ -72,7 +77,7 @@ def clblast_cc(routine, cuda=False): result += ("," + NL + indent2).join([a for a in arguments]) result += "," + NL + indent2 if cuda: - result += "CUstream*" + result += "const CUcontext, const CUdevice" else: result += "cl_command_queue*, cl_event*" result += ");" + NL diff --git a/scripts/generator/generator/routine.py b/scripts/generator/generator/routine.py index c3c1f775..b6b55821 100644 --- a/scripts/generator/generator/routine.py +++ b/scripts/generator/generator/routine.py @@ -813,7 +813,7 @@ class Routine: result += (",\n" + indent).join([a for a in arguments]) result += ",\n" + indent if cuda: - result += "CUstream* stream" + result += "const CUcontext context, const CUdevice device" else: result += "cl_command_queue* queue, cl_event* event" + default_event result += ")" @@ -830,7 +830,7 @@ class Routine: result += (",\n" + indent).join([a for a in arguments]) result += ",\n" + indent if cuda: - result += "CUstream* stream" + result += "const CUcontext, const CUdevice" else: result += "cl_command_queue*, cl_event*" result += ")" diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp index 5f30d023..f9a24236 100644 --- a/src/clblast_cuda.cpp +++ b/src/clblast_cuda.cpp @@ -30,19 +30,19 @@ StatusCode Rotg(CUdeviceptr, const size_t, CUdeviceptr, const size_t, CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream* stream) { + const CUcontext, const CUdevice) { return StatusCode::kNotImplemented; } template StatusCode PUBLIC_API Rotg(CUdeviceptr, const size_t, CUdeviceptr, const size_t, CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Rotg(CUdeviceptr, const size_t, CUdeviceptr, const size_t, CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Generate modified givens plane rotation: SROTMG/DROTMG template @@ -51,7 +51,7 @@ StatusCode Rotmg(CUdeviceptr, const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream* stream) { + const CUcontext, const CUdevice) { return StatusCode::kNotImplemented; } template StatusCode PUBLIC_API Rotmg(CUdeviceptr, const size_t, @@ -59,13 +59,13 @@ template StatusCode PUBLIC_API Rotmg(CUdeviceptr, const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Rotmg(CUdeviceptr, const size_t, CUdeviceptr, const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Apply givens plane rotation: SROT/DROT template @@ -74,7 +74,7 @@ StatusCode Rot(const size_t, CUdeviceptr, const size_t, const size_t, const T, const T, - CUstream* stream) { + const CUcontext, const CUdevice) { return StatusCode::kNotImplemented; } template StatusCode PUBLIC_API Rot(const size_t, @@ -82,13 +82,13 @@ template StatusCode PUBLIC_API Rot(const size_t, CUdeviceptr, const size_t, const size_t, const float, const float, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Rot(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, const double, const double, - CUstream*); + const CUcontext, const CUdevice); // Apply modified givens plane rotation: SROTM/DROTM template @@ -96,28 +96,30 @@ StatusCode Rotm(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream* stream) { + const CUcontext, const CUdevice) { return StatusCode::kNotImplemented; } template StatusCode PUBLIC_API Rotm(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Rotm(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Swap two vectors: SSWAP/DSWAP/CSWAP/ZSWAP/HSWAP template StatusCode Swap(const size_t n, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xswap(queue_cpp, event); routine.DoSwap(n, Buffer(x_buffer), x_offset, x_inc, @@ -128,32 +130,34 @@ StatusCode Swap(const size_t n, template StatusCode PUBLIC_API Swap(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Swap(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Swap(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Swap(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Swap(const size_t, CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Vector scaling: SSCAL/DSCAL/CSCAL/ZSCAL/HSCAL template StatusCode Scal(const size_t n, const T alpha, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xscal(queue_cpp, event); routine.DoScal(n, alpha, @@ -164,32 +168,34 @@ StatusCode Scal(const size_t n, template StatusCode PUBLIC_API Scal(const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Scal(const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Scal(const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Scal(const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Scal(const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Vector copy: SCOPY/DCOPY/CCOPY/ZCOPY/HCOPY template StatusCode Copy(const size_t n, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xcopy(queue_cpp, event); routine.DoCopy(n, Buffer(x_buffer), x_offset, x_inc, @@ -200,23 +206,23 @@ StatusCode Copy(const size_t n, template StatusCode PUBLIC_API Copy(const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Copy(const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Copy(const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Copy(const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Copy(const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Vector-times-constant plus vector: SAXPY/DAXPY/CAXPY/ZAXPY/HAXPY template @@ -224,9 +230,11 @@ StatusCode Axpy(const size_t n, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xaxpy(queue_cpp, event); routine.DoAxpy(n, alpha, @@ -239,27 +247,27 @@ template StatusCode PUBLIC_API Axpy(const size_t, const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Axpy(const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Axpy(const size_t, const float2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Axpy(const size_t, const double2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Axpy(const size_t, const half, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Dot product of two vectors: SDOT/DDOT/HDOT template @@ -267,9 +275,11 @@ StatusCode Dot(const size_t n, CUdeviceptr dot_buffer, const size_t dot_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xdot(queue_cpp, event); routine.DoDot(n, Buffer(dot_buffer), dot_offset, @@ -282,17 +292,17 @@ template StatusCode PUBLIC_API Dot(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Dot(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Dot(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Dot product of two complex vectors: CDOTU/ZDOTU template @@ -300,9 +310,11 @@ StatusCode Dotu(const size_t n, CUdeviceptr dot_buffer, const size_t dot_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xdotu(queue_cpp, event); routine.DoDotu(n, Buffer(dot_buffer), dot_offset, @@ -315,12 +327,12 @@ template StatusCode PUBLIC_API Dotu(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Dotu(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Dot product of two complex vectors, one conjugated: CDOTC/ZDOTC template @@ -328,9 +340,11 @@ StatusCode Dotc(const size_t n, CUdeviceptr dot_buffer, const size_t dot_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xdotc(queue_cpp, event); routine.DoDotc(n, Buffer(dot_buffer), dot_offset, @@ -343,21 +357,23 @@ template StatusCode PUBLIC_API Dotc(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Dotc(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Euclidian norm of a vector: SNRM2/DNRM2/ScNRM2/DzNRM2/HNRM2 template StatusCode Nrm2(const size_t n, CUdeviceptr nrm2_buffer, const size_t nrm2_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xnrm2(queue_cpp, event); routine.DoNrm2(n, Buffer(nrm2_buffer), nrm2_offset, @@ -368,32 +384,34 @@ StatusCode Nrm2(const size_t n, template StatusCode PUBLIC_API Nrm2(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Nrm2(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Nrm2(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Nrm2(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Nrm2(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Absolute sum of values in a vector: SASUM/DASUM/ScASUM/DzASUM/HASUM template StatusCode Asum(const size_t n, CUdeviceptr asum_buffer, const size_t asum_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xasum(queue_cpp, event); routine.DoAsum(n, Buffer(asum_buffer), asum_offset, @@ -404,32 +422,34 @@ StatusCode Asum(const size_t n, template StatusCode PUBLIC_API Asum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Asum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Asum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Asum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Asum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Sum of values in a vector (non-BLAS function): SSUM/DSUM/ScSUM/DzSUM/HSUM template StatusCode Sum(const size_t n, CUdeviceptr sum_buffer, const size_t sum_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsum(queue_cpp, event); routine.DoSum(n, Buffer(sum_buffer), sum_offset, @@ -440,32 +460,34 @@ StatusCode Sum(const size_t n, template StatusCode PUBLIC_API Sum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Sum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Sum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Sum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Sum(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Index of absolute maximum value in a vector: iSAMAX/iDAMAX/iCAMAX/iZAMAX/iHAMAX template StatusCode Amax(const size_t n, CUdeviceptr imax_buffer, const size_t imax_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xamax(queue_cpp, event); routine.DoAmax(n, Buffer(imax_buffer), imax_offset, @@ -476,32 +498,34 @@ StatusCode Amax(const size_t n, template StatusCode PUBLIC_API Amax(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amax(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amax(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amax(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amax(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Index of absolute minimum value in a vector (non-BLAS function): iSAMIN/iDAMIN/iCAMIN/iZAMIN/iHAMIN template StatusCode Amin(const size_t n, CUdeviceptr imin_buffer, const size_t imin_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xamin(queue_cpp, event); routine.DoAmin(n, Buffer(imin_buffer), imin_offset, @@ -512,32 +536,34 @@ StatusCode Amin(const size_t n, template StatusCode PUBLIC_API Amin(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amin(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amin(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amin(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Amin(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Index of maximum value in a vector (non-BLAS function): iSMAX/iDMAX/iCMAX/iZMAX/iHMAX template StatusCode Max(const size_t n, CUdeviceptr imax_buffer, const size_t imax_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xmax(queue_cpp, event); routine.DoMax(n, Buffer(imax_buffer), imax_offset, @@ -548,32 +574,34 @@ StatusCode Max(const size_t n, template StatusCode PUBLIC_API Max(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Max(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Max(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Max(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Max(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Index of minimum value in a vector (non-BLAS function): iSMIN/iDMIN/iCMIN/iZMIN/iHMIN template StatusCode Min(const size_t n, CUdeviceptr imin_buffer, const size_t imin_offset, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xmin(queue_cpp, event); routine.DoMin(n, Buffer(imin_buffer), imin_offset, @@ -584,23 +612,23 @@ StatusCode Min(const size_t n, template StatusCode PUBLIC_API Min(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Min(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Min(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Min(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Min(const size_t, CUdeviceptr, const size_t, const CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // ================================================================================================= // BLAS level-2 (matrix-vector) routines @@ -615,9 +643,11 @@ StatusCode Gemv(const Layout layout, const Transpose a_transpose, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xgemv(queue_cpp, event); routine.DoGemv(layout, a_transpose, m, n, @@ -636,7 +666,7 @@ template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const size_t, const size_t, const double, @@ -644,7 +674,7 @@ template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const size_t, const size_t, const float2, @@ -652,7 +682,7 @@ template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const size_t, const size_t, const double2, @@ -660,7 +690,7 @@ template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const size_t, const size_t, const half, @@ -668,7 +698,7 @@ template StatusCode PUBLIC_API Gemv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // General banded matrix-vector multiplication: SGBMV/DGBMV/CGBMV/ZGBMV/HGBMV template @@ -679,9 +709,11 @@ StatusCode Gbmv(const Layout layout, const Transpose a_transpose, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xgbmv(queue_cpp, event); routine.DoGbmv(layout, a_transpose, m, n, kl, ku, @@ -700,7 +732,7 @@ template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const size_t, const size_t, const size_t, const size_t, const double, @@ -708,7 +740,7 @@ template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const size_t, const size_t, const size_t, const size_t, const float2, @@ -716,7 +748,7 @@ template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const size_t, const size_t, const size_t, const size_t, const double2, @@ -724,7 +756,7 @@ template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const size_t, const size_t, const size_t, const size_t, const half, @@ -732,7 +764,7 @@ template StatusCode PUBLIC_API Gbmv(const Layout, const Transpose, const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian matrix-vector multiplication: CHEMV/ZHEMV template @@ -743,9 +775,11 @@ StatusCode Hemv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xhemv(queue_cpp, event); routine.DoHemv(layout, triangle, n, @@ -764,7 +798,7 @@ template StatusCode PUBLIC_API Hemv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Hemv(const Layout, const Triangle, const size_t, const double2, @@ -772,7 +806,7 @@ template StatusCode PUBLIC_API Hemv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian banded matrix-vector multiplication: CHBMV/ZHBMV template @@ -783,9 +817,11 @@ StatusCode Hbmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xhbmv(queue_cpp, event); routine.DoHbmv(layout, triangle, n, k, @@ -804,7 +840,7 @@ template StatusCode PUBLIC_API Hbmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Hbmv(const Layout, const Triangle, const size_t, const size_t, const double2, @@ -812,7 +848,7 @@ template StatusCode PUBLIC_API Hbmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian packed matrix-vector multiplication: CHPMV/ZHPMV template @@ -823,9 +859,11 @@ StatusCode Hpmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xhpmv(queue_cpp, event); routine.DoHpmv(layout, triangle, n, @@ -844,7 +882,7 @@ template StatusCode PUBLIC_API Hpmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Hpmv(const Layout, const Triangle, const size_t, const double2, @@ -852,7 +890,7 @@ template StatusCode PUBLIC_API Hpmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric matrix-vector multiplication: SSYMV/DSYMV/HSYMV template @@ -863,9 +901,11 @@ StatusCode Symv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsymv(queue_cpp, event); routine.DoSymv(layout, triangle, n, @@ -884,7 +924,7 @@ template StatusCode PUBLIC_API Symv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Symv(const Layout, const Triangle, const size_t, const double, @@ -892,7 +932,7 @@ template StatusCode PUBLIC_API Symv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Symv(const Layout, const Triangle, const size_t, const half, @@ -900,7 +940,7 @@ template StatusCode PUBLIC_API Symv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric banded matrix-vector multiplication: SSBMV/DSBMV/HSBMV template @@ -911,9 +951,11 @@ StatusCode Sbmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsbmv(queue_cpp, event); routine.DoSbmv(layout, triangle, n, k, @@ -932,7 +974,7 @@ template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, const size_t, const size_t, const double, @@ -940,7 +982,7 @@ template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, const size_t, const size_t, const half, @@ -948,7 +990,7 @@ template StatusCode PUBLIC_API Sbmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric packed matrix-vector multiplication: SSPMV/DSPMV/HSPMV template @@ -959,9 +1001,11 @@ StatusCode Spmv(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const T beta, CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xspmv(queue_cpp, event); routine.DoSpmv(layout, triangle, n, @@ -980,7 +1024,7 @@ template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, const size_t, const double, @@ -988,7 +1032,7 @@ template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, const size_t, const half, @@ -996,7 +1040,7 @@ template StatusCode PUBLIC_API Spmv(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Triangular matrix-vector multiplication: STRMV/DTRMV/CTRMV/ZTRMV/HTRMV template @@ -1004,9 +1048,11 @@ StatusCode Trmv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xtrmv(queue_cpp, event); routine.DoTrmv(layout, triangle, a_transpose, diagonal, n, @@ -1019,27 +1065,27 @@ template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const T const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Triangular banded matrix-vector multiplication: STBMV/DTBMV/CTBMV/ZTBMV/HTBMV template @@ -1047,9 +1093,11 @@ StatusCode Tbmv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const size_t k, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xtbmv(queue_cpp, event); routine.DoTbmv(layout, triangle, a_transpose, diagonal, n, k, @@ -1062,27 +1110,27 @@ template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const T const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tbmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Triangular packed matrix-vector multiplication: STPMV/DTPMV/CTPMV/ZTPMV/HTPMV template @@ -1090,9 +1138,11 @@ StatusCode Tpmv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const CUdeviceptr ap_buffer, const size_t ap_offset, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xtpmv(queue_cpp, event); routine.DoTpmv(layout, triangle, a_transpose, diagonal, n, @@ -1105,27 +1155,27 @@ template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const T const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tpmv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Solves a triangular system of equations: STRSV/DTRSV/CTRSV/ZTRSV template @@ -1133,9 +1183,11 @@ StatusCode Trsv(const Layout layout, const Triangle triangle, const Transpose a_ const size_t n, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xtrsv(queue_cpp, event); routine.DoTrsv(layout, triangle, a_transpose, diagonal, n, @@ -1148,22 +1200,22 @@ template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const T const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Solves a banded triangular system of equations: STBSV/DTBSV/CTBSV/ZTBSV template @@ -1171,29 +1223,29 @@ StatusCode Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream* stream) { + const CUcontext, const CUdevice) { return StatusCode::kNotImplemented; } template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tbsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Solves a packed triangular system of equations: STPSV/DTPSV/CTPSV/ZTPSV template @@ -1201,29 +1253,29 @@ StatusCode Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream* stream) { + const CUcontext, const CUdevice) { return StatusCode::kNotImplemented; } template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Tpsv(const Layout, const Triangle, const Transpose, const Diagonal, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // General rank-1 matrix update: SGER/DGER/HGER template @@ -1233,9 +1285,11 @@ StatusCode Ger(const Layout layout, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xger(queue_cpp, event); routine.DoGer(layout, m, n, @@ -1252,21 +1306,21 @@ template StatusCode PUBLIC_API Ger(const Layout, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Ger(const Layout, const size_t, const size_t, const double, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Ger(const Layout, const size_t, const size_t, const half, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // General rank-1 complex matrix update: CGERU/ZGERU template @@ -1276,9 +1330,11 @@ StatusCode Geru(const Layout layout, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xgeru(queue_cpp, event); routine.DoGeru(layout, m, n, @@ -1295,14 +1351,14 @@ template StatusCode PUBLIC_API Geru(const Layout, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Geru(const Layout, const size_t, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // General rank-1 complex conjugated matrix update: CGERC/ZGERC template @@ -1312,9 +1368,11 @@ StatusCode Gerc(const Layout layout, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xgerc(queue_cpp, event); routine.DoGerc(layout, m, n, @@ -1331,14 +1389,14 @@ template StatusCode PUBLIC_API Gerc(const Layout, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gerc(const Layout, const size_t, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian rank-1 matrix update: CHER/ZHER template @@ -1347,9 +1405,11 @@ StatusCode Her(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xher,T>(queue_cpp, event); routine.DoHer(layout, triangle, n, @@ -1364,13 +1424,13 @@ template StatusCode PUBLIC_API Her(const Layout, const Triangle, const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Her(const Layout, const Triangle, const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian packed rank-1 matrix update: CHPR/ZHPR template @@ -1379,9 +1439,11 @@ StatusCode Hpr(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xhpr,T>(queue_cpp, event); routine.DoHpr(layout, triangle, n, @@ -1396,13 +1458,13 @@ template StatusCode PUBLIC_API Hpr(const Layout, const Triangle, const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Hpr(const Layout, const Triangle, const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian rank-2 matrix update: CHER2/ZHER2 template @@ -1412,9 +1474,11 @@ StatusCode Her2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xher2(queue_cpp, event); routine.DoHer2(layout, triangle, n, @@ -1431,14 +1495,14 @@ template StatusCode PUBLIC_API Her2(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Her2(const Layout, const Triangle, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian packed rank-2 matrix update: CHPR2/ZHPR2 template @@ -1448,9 +1512,11 @@ StatusCode Hpr2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xhpr2(queue_cpp, event); routine.DoHpr2(layout, triangle, n, @@ -1467,14 +1533,14 @@ template StatusCode PUBLIC_API Hpr2(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Hpr2(const Layout, const Triangle, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric rank-1 matrix update: SSYR/DSYR/HSYR template @@ -1483,9 +1549,11 @@ StatusCode Syr(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsyr(queue_cpp, event); routine.DoSyr(layout, triangle, n, @@ -1500,19 +1568,19 @@ template StatusCode PUBLIC_API Syr(const Layout, const Triangle, const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr(const Layout, const Triangle, const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr(const Layout, const Triangle, const size_t, const half, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric packed rank-1 matrix update: SSPR/DSPR/HSPR template @@ -1521,9 +1589,11 @@ StatusCode Spr(const Layout layout, const Triangle triangle, const T alpha, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xspr(queue_cpp, event); routine.DoSpr(layout, triangle, n, @@ -1538,19 +1608,19 @@ template StatusCode PUBLIC_API Spr(const Layout, const Triangle, const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Spr(const Layout, const Triangle, const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Spr(const Layout, const Triangle, const size_t, const half, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric rank-2 matrix update: SSYR2/DSYR2/HSYR2 template @@ -1560,9 +1630,11 @@ StatusCode Syr2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsyr2(queue_cpp, event); routine.DoSyr2(layout, triangle, n, @@ -1579,21 +1651,21 @@ template StatusCode PUBLIC_API Syr2(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr2(const Layout, const Triangle, const size_t, const double, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr2(const Layout, const Triangle, const size_t, const half, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric packed rank-2 matrix update: SSPR2/DSPR2/HSPR2 template @@ -1603,9 +1675,11 @@ StatusCode Spr2(const Layout layout, const Triangle triangle, const CUdeviceptr x_buffer, const size_t x_offset, const size_t x_inc, const CUdeviceptr y_buffer, const size_t y_offset, const size_t y_inc, CUdeviceptr ap_buffer, const size_t ap_offset, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xspr2(queue_cpp, event); routine.DoSpr2(layout, triangle, n, @@ -1622,21 +1696,21 @@ template StatusCode PUBLIC_API Spr2(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Spr2(const Layout, const Triangle, const size_t, const double, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Spr2(const Layout, const Triangle, const size_t, const half, const CUdeviceptr, const size_t, const size_t, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // ================================================================================================= // BLAS level-3 (matrix-matrix) routines @@ -1651,9 +1725,11 @@ StatusCode Gemm(const Layout layout, const Transpose a_transpose, const Transpos const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xgemm(queue_cpp, event); routine.DoGemm(layout, a_transpose, b_transpose, m, n, k, @@ -1672,7 +1748,7 @@ template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const double, @@ -1680,7 +1756,7 @@ template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const float2, @@ -1688,7 +1764,7 @@ template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const double2, @@ -1696,7 +1772,7 @@ template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, cons const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const half, @@ -1704,7 +1780,7 @@ template StatusCode PUBLIC_API Gemm(const Layout, const Transpose, const T const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Symmetric matrix-matrix multiplication: SSYMM/DSYMM/CSYMM/ZSYMM/HSYMM template @@ -1715,9 +1791,11 @@ StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsymm(queue_cpp, event); routine.DoSymm(layout, side, triangle, m, n, @@ -1736,7 +1814,7 @@ template StatusCode PUBLIC_API Symm(const Layout, const Side, const Trian const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, const size_t, const size_t, const double, @@ -1744,7 +1822,7 @@ template StatusCode PUBLIC_API Symm(const Layout, const Side, const Tria const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, const size_t, const size_t, const float2, @@ -1752,7 +1830,7 @@ template StatusCode PUBLIC_API Symm(const Layout, const Side, const Tria const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, const size_t, const size_t, const double2, @@ -1760,7 +1838,7 @@ template StatusCode PUBLIC_API Symm(const Layout, const Side, const Tri const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triangle, const size_t, const size_t, const half, @@ -1768,7 +1846,7 @@ template StatusCode PUBLIC_API Symm(const Layout, const Side, const Triang const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Hermitian matrix-matrix multiplication: CHEMM/ZHEMM template @@ -1779,9 +1857,11 @@ StatusCode Hemm(const Layout layout, const Side side, const Triangle triangle, const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xhemm(queue_cpp, event); routine.DoHemm(layout, side, triangle, m, n, @@ -1800,7 +1880,7 @@ template StatusCode PUBLIC_API Hemm(const Layout, const Side, const Tria const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Hemm(const Layout, const Side, const Triangle, const size_t, const size_t, const double2, @@ -1808,7 +1888,7 @@ template StatusCode PUBLIC_API Hemm(const Layout, const Side, const Tri const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Rank-K update of a symmetric matrix: SSYRK/DSYRK/CSYRK/ZSYRK/HSYRK template @@ -1818,9 +1898,11 @@ StatusCode Syrk(const Layout layout, const Triangle triangle, const Transpose a_ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsyrk(queue_cpp, event); routine.DoSyrk(layout, triangle, a_transpose, n, k, @@ -1837,35 +1919,35 @@ template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const T const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, const size_t, const size_t, const double, const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, const size_t, const size_t, const float2, const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, const size_t, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syrk(const Layout, const Triangle, const Transpose, const size_t, const size_t, const half, const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Rank-K update of a hermitian matrix: CHERK/ZHERK template @@ -1875,9 +1957,11 @@ StatusCode Herk(const Layout layout, const Triangle triangle, const Transpose a_ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xherk,T>(queue_cpp, event); routine.DoHerk(layout, triangle, a_transpose, n, k, @@ -1894,14 +1978,14 @@ template StatusCode PUBLIC_API Herk(const Layout, const Triangle, const T const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Herk(const Layout, const Triangle, const Transpose, const size_t, const size_t, const double, const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Rank-2K update of a symmetric matrix: SSYR2K/DSYR2K/CSYR2K/ZSYR2K/HSYR2K template @@ -1912,9 +1996,11 @@ StatusCode Syr2k(const Layout layout, const Triangle triangle, const Transpose a const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const T beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xsyr2k(queue_cpp, event); routine.DoSyr2k(layout, triangle, ab_transpose, n, k, @@ -1933,7 +2019,7 @@ template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, const size_t, const size_t, const double, @@ -1941,7 +2027,7 @@ template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, const size_t, const size_t, const float2, @@ -1949,7 +2035,7 @@ template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const const CUdeviceptr, const size_t, const size_t, const float2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, const size_t, const size_t, const double2, @@ -1957,7 +2043,7 @@ template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, cons const CUdeviceptr, const size_t, const size_t, const double2, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const Transpose, const size_t, const size_t, const half, @@ -1965,7 +2051,7 @@ template StatusCode PUBLIC_API Syr2k(const Layout, const Triangle, const T const CUdeviceptr, const size_t, const size_t, const half, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Rank-2K update of a hermitian matrix: CHER2K/ZHER2K template @@ -1976,9 +2062,11 @@ StatusCode Her2k(const Layout layout, const Triangle triangle, const Transpose a const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const U beta, CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xher2k(queue_cpp, event); routine.DoHer2k(layout, triangle, ab_transpose, n, k, @@ -1997,7 +2085,7 @@ template StatusCode PUBLIC_API Her2k(const Layout, const Triangle, const CUdeviceptr, const size_t, const size_t, const float, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Her2k(const Layout, const Triangle, const Transpose, const size_t, const size_t, const double2, @@ -2005,7 +2093,7 @@ template StatusCode PUBLIC_API Her2k(const Layout, const Triangl const CUdeviceptr, const size_t, const size_t, const double, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Triangular matrix-matrix multiplication: STRMM/DTRMM/CTRMM/ZTRMM/HTRMM template @@ -2014,9 +2102,11 @@ StatusCode Trmm(const Layout layout, const Side side, const Triangle triangle, c const T alpha, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xtrmm(queue_cpp, event); routine.DoTrmm(layout, side, triangle, a_transpose, diagonal, m, n, @@ -2031,31 +2121,31 @@ template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Trian const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const float2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trmm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const half, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Solves a triangular system of equations: STRSM/DTRSM/CTRSM/ZTRSM template @@ -2064,9 +2154,11 @@ StatusCode Trsm(const Layout layout, const Side side, const Triangle triangle, c const T alpha, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xtrsm(queue_cpp, event); routine.DoTrsm(layout, side, triangle, a_transpose, diagonal, m, n, @@ -2081,25 +2173,25 @@ template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Trian const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const float2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Trsm(const Layout, const Side, const Triangle, const Transpose, const Diagonal, const size_t, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // ================================================================================================= // Extra non-BLAS routines (level-X) @@ -2112,9 +2204,11 @@ StatusCode Omatcopy(const Layout layout, const Transpose a_transpose, const T alpha, const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xomatcopy(queue_cpp, event); routine.DoOmatcopy(layout, a_transpose, m, n, @@ -2129,40 +2223,42 @@ template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, const float, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, const size_t, const size_t, const double, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, const size_t, const size_t, const float2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, const size_t, const size_t, const double2, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Omatcopy(const Layout, const Transpose, const size_t, const size_t, const half, const CUdeviceptr, const size_t, const size_t, CUdeviceptr, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Im2col function (non-BLAS function): SIM2COL/DIM2COL/CIM2COL/ZIM2COL/HIM2COL template StatusCode Im2col(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const CUdeviceptr im_buffer, const size_t im_offset, CUdeviceptr col_buffer, const size_t col_offset, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = Xim2col(queue_cpp, event); routine.DoIm2col(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, Buffer(im_buffer), im_offset, @@ -2173,23 +2269,23 @@ StatusCode Im2col(const size_t channels, const size_t height, const size_t width template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API Im2col(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const CUdeviceptr, const size_t, CUdeviceptr, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED template @@ -2198,9 +2294,11 @@ StatusCode AxpyBatched(const size_t n, const CUdeviceptr x_buffer, const size_t *x_offsets, const size_t x_inc, CUdeviceptr y_buffer, const size_t *y_offsets, const size_t y_inc, const size_t batch_count, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = XaxpyBatched(queue_cpp, event); auto alphas_cpp = std::vector(); auto x_offsets_cpp = std::vector(); @@ -2223,31 +2321,31 @@ template StatusCode PUBLIC_API AxpyBatched(const size_t, const CUdeviceptr, const size_t*, const size_t, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API AxpyBatched(const size_t, const double*, const CUdeviceptr, const size_t*, const size_t, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API AxpyBatched(const size_t, const float2*, const CUdeviceptr, const size_t*, const size_t, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API AxpyBatched(const size_t, const double2*, const CUdeviceptr, const size_t*, const size_t, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API AxpyBatched(const size_t, const half*, const CUdeviceptr, const size_t*, const size_t, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // Batched version of GEMM: SGEMMBATCHED/DGEMMBATCHED/CGEMMBATCHED/ZGEMMBATCHED/HGEMMBATCHED template @@ -2259,9 +2357,11 @@ StatusCode GemmBatched(const Layout layout, const Transpose a_transpose, const T const T *betas, CUdeviceptr c_buffer, const size_t *c_offsets, const size_t c_ld, const size_t batch_count, - CUstream* stream) { + const CUcontext context, const CUdevice device) { try { - auto queue_cpp = Queue(*queue); + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); auto routine = XgemmBatched(queue_cpp, event); auto alphas_cpp = std::vector(); auto betas_cpp = std::vector(); @@ -2294,7 +2394,7 @@ template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const float*, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const double*, @@ -2303,7 +2403,7 @@ template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose const double*, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const float2*, @@ -2312,7 +2412,7 @@ template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose const float2*, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const double2*, @@ -2321,7 +2421,7 @@ template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpos const double2*, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const Transpose, const size_t, const size_t, const size_t, const half*, @@ -2330,7 +2430,7 @@ template StatusCode PUBLIC_API GemmBatched(const Layout, const Transpose, const half*, CUdeviceptr, const size_t*, const size_t, const size_t, - CUstream*); + const CUcontext, const CUdevice); // ================================================================================================= } // namespace clblast diff --git a/src/utilities/buffer_test.hpp b/src/utilities/buffer_test.hpp index a5b6be4b..fd071434 100644 --- a/src/utilities/buffer_test.hpp +++ b/src/utilities/buffer_test.hpp @@ -15,7 +15,7 @@ #ifndef CLBLAST_BUFFER_TEST_H_ #define CLBLAST_BUFFER_TEST_H_ -#include "utilities/utilities.hpp +#include "utilities/utilities.hpp" namespace clblast { // ================================================================================================= -- cgit v1.2.3 From 2d7b648a243a97d18899677a51c9e441d6edf508 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sat, 14 Oct 2017 10:49:25 +0200 Subject: Added OpenCL to CUDA translation header for the kernels --- src/kernels/opencl_to_cuda.h | 51 ++++++++++++++++++++++++++++++++++++++++++++ src/routine.cpp | 7 ++++++ 2 files changed, 58 insertions(+) create mode 100644 src/kernels/opencl_to_cuda.h diff --git a/src/kernels/opencl_to_cuda.h b/src/kernels/opencl_to_cuda.h new file mode 100644 index 00000000..43a26a2f --- /dev/null +++ b/src/kernels/opencl_to_cuda.h @@ -0,0 +1,51 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file contains an (incomplete) header to interpret OpenCL kernels as CUDA kernels. +// +// ================================================================================================= + +// Replaces the OpenCL keywords with CUDA equivalent +#define __kernel __placeholder__ +#define __global +#define __placeholder__ extern "C" __global__ +#define __local __shared__ +#define restrict __restrict__ +#define __constant const +#define inline __device__ inline // assumes all device functions are annotated with inline in OpenCL + +// Replaces OpenCL synchronisation with CUDA synchronisation +#define barrier(x) __syncthreads() + +// Replaces the OpenCL get_xxx_ID with CUDA equivalents +__device__ int get_local_id(int x) { + if (x == 0) { return threadIdx.x; } + if (x == 1) { return threadIdx.y; } + return threadIdx.z; +} +__device__ int get_group_id(int x) { + if (x == 0) { return blockIdx.x; } + if (x == 1) { return blockIdx.y;} + return blockIdx.z; +} +__device__ int get_global_id(int x) { + if (x == 0) { return blockIdx.x*blockDim.x + threadIdx.x; } + if (y == 0) { return blockIdx.y*blockDim.y + threadIdx.y; } + return blockIdx.z*blockDim.z + threadIdx.z; +} + +// Adds the data-types which are not available natively under CUDA +typedef struct { float s0; float s1; float s2; float s3; + float s4; float s5; float s6; float s7; } float8; +typedef struct { float s0; float s1; float s2; float s3; + float s4; float s5; float s6; float s7; + float s8; float s9; float s10; float s11; + float s12; float s13; float s14; float s15; } float16; + +// ================================================================================================= diff --git a/src/routine.cpp b/src/routine.cpp index aaa85fde..0f9fe360 100644 --- a/src/routine.cpp +++ b/src/routine.cpp @@ -167,6 +167,13 @@ void Routine::InitProgram(std::initializer_list source) { source_string += "#define GLOBAL_MEM_FENCE 1\n"; } + // Optionally adds a translation header from OpenCL kernels to CUDA kernels + #ifdef CUDA_API + source_string += + #include "kernels/opencl_to_cuda.h" + ; + #endif + // Loads the common header (typedefs and defines and such) source_string += #include "kernels/common.opencl" -- cgit v1.2.3 From 16b9efd60528ea9230810e6cb6287fe780f02527 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sat, 14 Oct 2017 10:50:28 +0200 Subject: Added first untested CUDA sample --- CMakeLists.txt | 4 +- samples/sgemm_cuda.cpp | 103 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+), 2 deletions(-) create mode 100644 samples/sgemm_cuda.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index a5a41f35..547d13e5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,7 +36,7 @@ option(CUDA "Build CLBlast with a CUDA API (beta)" OFF) if(NOT OPENCL AND NOT CUDA) message(FATAL_ERROR "No API selected, choose from OpenCL (-DOPENCL=ON) or CUDA (-DCUDA=ON)") endif() -if(OPENCL AND CUDA) +if(OPENCL AND CUDA) message(FATAL_ERROR "Multiple APIs selected, choose either OpenCL (-DOPENCL=ON -DCUDA=OFF) or CUDA (-DCUDA=ON -DOPENCL=OFF)") endif() if(OPENCL) @@ -202,7 +202,7 @@ if(OPENCL) set(SAMPLE_PROGRAMS_C ${SAMPLE_PROGRAMS_C} sgemm_netlib) endif() elseif(CUDA) - set(SAMPLE_PROGRAMS_CPP ) + set(SAMPLE_PROGRAMS_CPP sgemm_cuda) set(SAMPLE_PROGRAMS_C ) endif() diff --git a/samples/sgemm_cuda.cpp b/samples/sgemm_cuda.cpp new file mode 100644 index 00000000..ed2ad588 --- /dev/null +++ b/samples/sgemm_cuda.cpp @@ -0,0 +1,103 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file demonstrates the use of the SGEMM routine with the C++ CUDA API of CLBlast. +// +// Note that this example is meant for illustration purposes only. CLBlast provides other programs +// for performance benchmarking ('client_xxxxx') and for correctness testing ('test_xxxxx'). +// +// ================================================================================================= + +#include +#include +#include + +// Includes the CUDA driver API +#include + +// Includes the CLBlast library +#include + +// ================================================================================================= + +// Example use of the single-precision Xgemm routine SGEMM +int main() { + + // CUDA device selection + const auto device_id = 0; + + // Example SGEMM arguments + const size_t m = 128; + const size_t n = 64; + const size_t k = 512; + const float alpha = 0.7f; + const float beta = 1.0f; + const auto a_ld = k; + const auto b_ld = n; + const auto c_ld = n; + + // Initializes the OpenCL device + CUdevice device; + cuDeviceGet(&device, device_id); + + // Creates the OpenCL context and stream + CUcontext context; + cuCtxCreate(context, 0, device); + CUstream stream; + cuStreamCreate(queue, CU_STREAM_NON_BLOCKING); + + // Populate host matrices with some example data + auto host_a = std::vector(m*k); + auto host_b = std::vector(n*k); + auto host_c = std::vector(m*n); + for (auto &item: host_a) { item = 12.193f; } + for (auto &item: host_b) { item = -8.199f; } + for (auto &item: host_c) { item = 0.0f; } + + // Copy the matrices to the device + CUdeviceptr device_a; + CUdeviceptr device_b; + CUdeviceptr device_c; + cuMemAlloc(device_a, host_a.size()*sizeof(float)); + cuMemAlloc(device_b, host_b.size()*sizeof(float)); + cuMemAlloc(device_c, host_c.size()*sizeof(float)); + cuMemcpyHtoDAsync(device_a, host_a.data()), host_a.size()*sizeof(T), queue); + cuMemcpyHtoDAsync(device_b, host_c.data()), host_b.size()*sizeof(T), queue); + cuMemcpyHtoDAsync(device_c, host_b.data()), host_c.size()*sizeof(T), queue); + + // Start the timer + auto start_time = std::chrono::steady_clock::now(); + + // Call the SGEMM routine. Note that the type of alpha and beta (float) determine the precision. + auto status = clblast::Gemm(clblast::Layout::kRowMajor, + clblast::Transpose::kNo, clblast::Transpose::kNo, + m, n, k, + alpha, + device_a(), 0, a_ld, + device_b(), 0, b_ld, + beta, + device_c(), 0, c_ld, + context, device); + + // Record the execution time + auto elapsed_time = std::chrono::steady_clock::now() - start_time; + auto time_ms = std::chrono::duration(elapsed_time).count(); + + // Example completed. See "clblast_cuda.h" for status codes (0 -> success). + printf("Completed SGEMM in %.3lf ms with status %d\n", time_ms, static_cast(status)); + + // Clean-up + cuMemFree(device_a); + cuMemFree(device_b); + cuMemFree(device_c); + cuStreamDestroy(stream); + return 0; +} + +// ================================================================================================= -- cgit v1.2.3 From 54d0c440ce84d61db1b462033052dd0f532a40d8 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sat, 14 Oct 2017 11:43:57 +0200 Subject: Various fixes to make the host code and sample compile with the CUDA API --- samples/sgemm_cuda.cpp | 26 +++++----- scripts/generator/generator/cpp.py | 3 +- src/clblast_cuda.cpp | 100 ++++++++++++++++++------------------- src/cupp11.hpp | 10 ++-- src/cxpp11_common.hpp | 1 + src/kernels/opencl_to_cuda.h | 11 ++++ 6 files changed, 84 insertions(+), 67 deletions(-) diff --git a/samples/sgemm_cuda.cpp b/samples/sgemm_cuda.cpp index ed2ad588..f1138316 100644 --- a/samples/sgemm_cuda.cpp +++ b/samples/sgemm_cuda.cpp @@ -19,7 +19,7 @@ #include // Includes the CUDA driver API -#include +#include // Includes the CLBlast library #include @@ -43,14 +43,15 @@ int main() { const auto c_ld = n; // Initializes the OpenCL device + cuInit(0); CUdevice device; cuDeviceGet(&device, device_id); // Creates the OpenCL context and stream CUcontext context; - cuCtxCreate(context, 0, device); + cuCtxCreate(&context, 0, device); CUstream stream; - cuStreamCreate(queue, CU_STREAM_NON_BLOCKING); + cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING); // Populate host matrices with some example data auto host_a = std::vector(m*k); @@ -64,12 +65,12 @@ int main() { CUdeviceptr device_a; CUdeviceptr device_b; CUdeviceptr device_c; - cuMemAlloc(device_a, host_a.size()*sizeof(float)); - cuMemAlloc(device_b, host_b.size()*sizeof(float)); - cuMemAlloc(device_c, host_c.size()*sizeof(float)); - cuMemcpyHtoDAsync(device_a, host_a.data()), host_a.size()*sizeof(T), queue); - cuMemcpyHtoDAsync(device_b, host_c.data()), host_b.size()*sizeof(T), queue); - cuMemcpyHtoDAsync(device_c, host_b.data()), host_c.size()*sizeof(T), queue); + cuMemAlloc(&device_a, host_a.size()*sizeof(float)); + cuMemAlloc(&device_b, host_b.size()*sizeof(float)); + cuMemAlloc(&device_c, host_c.size()*sizeof(float)); + cuMemcpyHtoDAsync(device_a, host_a.data(), host_a.size()*sizeof(float), stream); + cuMemcpyHtoDAsync(device_b, host_c.data(), host_b.size()*sizeof(float), stream); + cuMemcpyHtoDAsync(device_c, host_b.data(), host_c.size()*sizeof(float), stream); // Start the timer auto start_time = std::chrono::steady_clock::now(); @@ -79,11 +80,12 @@ int main() { clblast::Transpose::kNo, clblast::Transpose::kNo, m, n, k, alpha, - device_a(), 0, a_ld, - device_b(), 0, b_ld, + device_a, 0, a_ld, + device_b, 0, b_ld, beta, - device_c(), 0, c_ld, + device_c, 0, c_ld, context, device); + cuStreamSynchronize(stream); // Record the execution time auto elapsed_time = std::chrono::steady_clock::now() - start_time; diff --git a/scripts/generator/generator/cpp.py b/scripts/generator/generator/cpp.py index 5413906a..2d18655f 100644 --- a/scripts/generator/generator/cpp.py +++ b/scripts/generator/generator/cpp.py @@ -56,7 +56,8 @@ def clblast_cc(routine, cuda=False): result += " auto queue_cpp = Queue(context_cpp, device_cpp);" + NL else: result += " auto queue_cpp = Queue(*queue);" + NL - result += " auto routine = X" + routine.plain_name() + "<" + routine.template.template + ">(queue_cpp, event);" + NL + event = "nullptr" if cuda else "event" + result += " auto routine = X" + routine.plain_name() + "<" + routine.template.template + ">(queue_cpp, " + event + ");" + NL if routine.batched: result += " " + (NL + " ").join(routine.batched_transform_to_cpp()) + NL result += " routine.Do" + routine.capitalized_name() + "(" diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp index f9a24236..0e3d949d 100644 --- a/src/clblast_cuda.cpp +++ b/src/clblast_cuda.cpp @@ -120,7 +120,7 @@ StatusCode Swap(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xswap(queue_cpp, event); + auto routine = Xswap(queue_cpp, nullptr); routine.DoSwap(n, Buffer(x_buffer), x_offset, x_inc, Buffer(y_buffer), y_offset, y_inc); @@ -158,7 +158,7 @@ StatusCode Scal(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xscal(queue_cpp, event); + auto routine = Xscal(queue_cpp, nullptr); routine.DoScal(n, alpha, Buffer(x_buffer), x_offset, x_inc); @@ -196,7 +196,7 @@ StatusCode Copy(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xcopy(queue_cpp, event); + auto routine = Xcopy(queue_cpp, nullptr); routine.DoCopy(n, Buffer(x_buffer), x_offset, x_inc, Buffer(y_buffer), y_offset, y_inc); @@ -235,7 +235,7 @@ StatusCode Axpy(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xaxpy(queue_cpp, event); + auto routine = Xaxpy(queue_cpp, nullptr); routine.DoAxpy(n, alpha, Buffer(x_buffer), x_offset, x_inc, @@ -280,7 +280,7 @@ StatusCode Dot(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xdot(queue_cpp, event); + auto routine = Xdot(queue_cpp, nullptr); routine.DoDot(n, Buffer(dot_buffer), dot_offset, Buffer(x_buffer), x_offset, x_inc, @@ -315,7 +315,7 @@ StatusCode Dotu(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xdotu(queue_cpp, event); + auto routine = Xdotu(queue_cpp, nullptr); routine.DoDotu(n, Buffer(dot_buffer), dot_offset, Buffer(x_buffer), x_offset, x_inc, @@ -345,7 +345,7 @@ StatusCode Dotc(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xdotc(queue_cpp, event); + auto routine = Xdotc(queue_cpp, nullptr); routine.DoDotc(n, Buffer(dot_buffer), dot_offset, Buffer(x_buffer), x_offset, x_inc, @@ -374,7 +374,7 @@ StatusCode Nrm2(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xnrm2(queue_cpp, event); + auto routine = Xnrm2(queue_cpp, nullptr); routine.DoNrm2(n, Buffer(nrm2_buffer), nrm2_offset, Buffer(x_buffer), x_offset, x_inc); @@ -412,7 +412,7 @@ StatusCode Asum(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xasum(queue_cpp, event); + auto routine = Xasum(queue_cpp, nullptr); routine.DoAsum(n, Buffer(asum_buffer), asum_offset, Buffer(x_buffer), x_offset, x_inc); @@ -450,7 +450,7 @@ StatusCode Sum(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsum(queue_cpp, event); + auto routine = Xsum(queue_cpp, nullptr); routine.DoSum(n, Buffer(sum_buffer), sum_offset, Buffer(x_buffer), x_offset, x_inc); @@ -488,7 +488,7 @@ StatusCode Amax(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xamax(queue_cpp, event); + auto routine = Xamax(queue_cpp, nullptr); routine.DoAmax(n, Buffer(imax_buffer), imax_offset, Buffer(x_buffer), x_offset, x_inc); @@ -526,7 +526,7 @@ StatusCode Amin(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xamin(queue_cpp, event); + auto routine = Xamin(queue_cpp, nullptr); routine.DoAmin(n, Buffer(imin_buffer), imin_offset, Buffer(x_buffer), x_offset, x_inc); @@ -564,7 +564,7 @@ StatusCode Max(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xmax(queue_cpp, event); + auto routine = Xmax(queue_cpp, nullptr); routine.DoMax(n, Buffer(imax_buffer), imax_offset, Buffer(x_buffer), x_offset, x_inc); @@ -602,7 +602,7 @@ StatusCode Min(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xmin(queue_cpp, event); + auto routine = Xmin(queue_cpp, nullptr); routine.DoMin(n, Buffer(imin_buffer), imin_offset, Buffer(x_buffer), x_offset, x_inc); @@ -648,7 +648,7 @@ StatusCode Gemv(const Layout layout, const Transpose a_transpose, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xgemv(queue_cpp, event); + auto routine = Xgemv(queue_cpp, nullptr); routine.DoGemv(layout, a_transpose, m, n, alpha, @@ -714,7 +714,7 @@ StatusCode Gbmv(const Layout layout, const Transpose a_transpose, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xgbmv(queue_cpp, event); + auto routine = Xgbmv(queue_cpp, nullptr); routine.DoGbmv(layout, a_transpose, m, n, kl, ku, alpha, @@ -780,7 +780,7 @@ StatusCode Hemv(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xhemv(queue_cpp, event); + auto routine = Xhemv(queue_cpp, nullptr); routine.DoHemv(layout, triangle, n, alpha, @@ -822,7 +822,7 @@ StatusCode Hbmv(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xhbmv(queue_cpp, event); + auto routine = Xhbmv(queue_cpp, nullptr); routine.DoHbmv(layout, triangle, n, k, alpha, @@ -864,7 +864,7 @@ StatusCode Hpmv(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xhpmv(queue_cpp, event); + auto routine = Xhpmv(queue_cpp, nullptr); routine.DoHpmv(layout, triangle, n, alpha, @@ -906,7 +906,7 @@ StatusCode Symv(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsymv(queue_cpp, event); + auto routine = Xsymv(queue_cpp, nullptr); routine.DoSymv(layout, triangle, n, alpha, @@ -956,7 +956,7 @@ StatusCode Sbmv(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsbmv(queue_cpp, event); + auto routine = Xsbmv(queue_cpp, nullptr); routine.DoSbmv(layout, triangle, n, k, alpha, @@ -1006,7 +1006,7 @@ StatusCode Spmv(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xspmv(queue_cpp, event); + auto routine = Xspmv(queue_cpp, nullptr); routine.DoSpmv(layout, triangle, n, alpha, @@ -1053,7 +1053,7 @@ StatusCode Trmv(const Layout layout, const Triangle triangle, const Transpose a_ const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xtrmv(queue_cpp, event); + auto routine = Xtrmv(queue_cpp, nullptr); routine.DoTrmv(layout, triangle, a_transpose, diagonal, n, Buffer(a_buffer), a_offset, a_ld, @@ -1098,7 +1098,7 @@ StatusCode Tbmv(const Layout layout, const Triangle triangle, const Transpose a_ const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xtbmv(queue_cpp, event); + auto routine = Xtbmv(queue_cpp, nullptr); routine.DoTbmv(layout, triangle, a_transpose, diagonal, n, k, Buffer(a_buffer), a_offset, a_ld, @@ -1143,7 +1143,7 @@ StatusCode Tpmv(const Layout layout, const Triangle triangle, const Transpose a_ const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xtpmv(queue_cpp, event); + auto routine = Xtpmv(queue_cpp, nullptr); routine.DoTpmv(layout, triangle, a_transpose, diagonal, n, Buffer(ap_buffer), ap_offset, @@ -1188,7 +1188,7 @@ StatusCode Trsv(const Layout layout, const Triangle triangle, const Transpose a_ const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xtrsv(queue_cpp, event); + auto routine = Xtrsv(queue_cpp, nullptr); routine.DoTrsv(layout, triangle, a_transpose, diagonal, n, Buffer(a_buffer), a_offset, a_ld, @@ -1290,7 +1290,7 @@ StatusCode Ger(const Layout layout, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xger(queue_cpp, event); + auto routine = Xger(queue_cpp, nullptr); routine.DoGer(layout, m, n, alpha, @@ -1335,7 +1335,7 @@ StatusCode Geru(const Layout layout, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xgeru(queue_cpp, event); + auto routine = Xgeru(queue_cpp, nullptr); routine.DoGeru(layout, m, n, alpha, @@ -1373,7 +1373,7 @@ StatusCode Gerc(const Layout layout, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xgerc(queue_cpp, event); + auto routine = Xgerc(queue_cpp, nullptr); routine.DoGerc(layout, m, n, alpha, @@ -1410,7 +1410,7 @@ StatusCode Her(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xher,T>(queue_cpp, event); + auto routine = Xher,T>(queue_cpp, nullptr); routine.DoHer(layout, triangle, n, alpha, @@ -1444,7 +1444,7 @@ StatusCode Hpr(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xhpr,T>(queue_cpp, event); + auto routine = Xhpr,T>(queue_cpp, nullptr); routine.DoHpr(layout, triangle, n, alpha, @@ -1479,7 +1479,7 @@ StatusCode Her2(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xher2(queue_cpp, event); + auto routine = Xher2(queue_cpp, nullptr); routine.DoHer2(layout, triangle, n, alpha, @@ -1517,7 +1517,7 @@ StatusCode Hpr2(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xhpr2(queue_cpp, event); + auto routine = Xhpr2(queue_cpp, nullptr); routine.DoHpr2(layout, triangle, n, alpha, @@ -1554,7 +1554,7 @@ StatusCode Syr(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsyr(queue_cpp, event); + auto routine = Xsyr(queue_cpp, nullptr); routine.DoSyr(layout, triangle, n, alpha, @@ -1594,7 +1594,7 @@ StatusCode Spr(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xspr(queue_cpp, event); + auto routine = Xspr(queue_cpp, nullptr); routine.DoSpr(layout, triangle, n, alpha, @@ -1635,7 +1635,7 @@ StatusCode Syr2(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsyr2(queue_cpp, event); + auto routine = Xsyr2(queue_cpp, nullptr); routine.DoSyr2(layout, triangle, n, alpha, @@ -1680,7 +1680,7 @@ StatusCode Spr2(const Layout layout, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xspr2(queue_cpp, event); + auto routine = Xspr2(queue_cpp, nullptr); routine.DoSpr2(layout, triangle, n, alpha, @@ -1730,7 +1730,7 @@ StatusCode Gemm(const Layout layout, const Transpose a_transpose, const Transpos const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xgemm(queue_cpp, event); + auto routine = Xgemm(queue_cpp, nullptr); routine.DoGemm(layout, a_transpose, b_transpose, m, n, k, alpha, @@ -1796,7 +1796,7 @@ StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsymm(queue_cpp, event); + auto routine = Xsymm(queue_cpp, nullptr); routine.DoSymm(layout, side, triangle, m, n, alpha, @@ -1862,7 +1862,7 @@ StatusCode Hemm(const Layout layout, const Side side, const Triangle triangle, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xhemm(queue_cpp, event); + auto routine = Xhemm(queue_cpp, nullptr); routine.DoHemm(layout, side, triangle, m, n, alpha, @@ -1903,7 +1903,7 @@ StatusCode Syrk(const Layout layout, const Triangle triangle, const Transpose a_ const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsyrk(queue_cpp, event); + auto routine = Xsyrk(queue_cpp, nullptr); routine.DoSyrk(layout, triangle, a_transpose, n, k, alpha, @@ -1962,7 +1962,7 @@ StatusCode Herk(const Layout layout, const Triangle triangle, const Transpose a_ const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xherk,T>(queue_cpp, event); + auto routine = Xherk,T>(queue_cpp, nullptr); routine.DoHerk(layout, triangle, a_transpose, n, k, alpha, @@ -2001,7 +2001,7 @@ StatusCode Syr2k(const Layout layout, const Triangle triangle, const Transpose a const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xsyr2k(queue_cpp, event); + auto routine = Xsyr2k(queue_cpp, nullptr); routine.DoSyr2k(layout, triangle, ab_transpose, n, k, alpha, @@ -2067,7 +2067,7 @@ StatusCode Her2k(const Layout layout, const Triangle triangle, const Transpose a const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xher2k(queue_cpp, event); + auto routine = Xher2k(queue_cpp, nullptr); routine.DoHer2k(layout, triangle, ab_transpose, n, k, alpha, @@ -2107,7 +2107,7 @@ StatusCode Trmm(const Layout layout, const Side side, const Triangle triangle, c const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xtrmm(queue_cpp, event); + auto routine = Xtrmm(queue_cpp, nullptr); routine.DoTrmm(layout, side, triangle, a_transpose, diagonal, m, n, alpha, @@ -2159,7 +2159,7 @@ StatusCode Trsm(const Layout layout, const Side side, const Triangle triangle, c const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xtrsm(queue_cpp, event); + auto routine = Xtrsm(queue_cpp, nullptr); routine.DoTrsm(layout, side, triangle, a_transpose, diagonal, m, n, alpha, @@ -2209,7 +2209,7 @@ StatusCode Omatcopy(const Layout layout, const Transpose a_transpose, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xomatcopy(queue_cpp, event); + auto routine = Xomatcopy(queue_cpp, nullptr); routine.DoOmatcopy(layout, a_transpose, m, n, alpha, @@ -2259,7 +2259,7 @@ StatusCode Im2col(const size_t channels, const size_t height, const size_t width const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = Xim2col(queue_cpp, event); + auto routine = Xim2col(queue_cpp, nullptr); routine.DoIm2col(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, Buffer(im_buffer), im_offset, Buffer(col_buffer), col_offset); @@ -2299,7 +2299,7 @@ StatusCode AxpyBatched(const size_t n, const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = XaxpyBatched(queue_cpp, event); + auto routine = XaxpyBatched(queue_cpp, nullptr); auto alphas_cpp = std::vector(); auto x_offsets_cpp = std::vector(); auto y_offsets_cpp = std::vector(); @@ -2362,7 +2362,7 @@ StatusCode GemmBatched(const Layout layout, const Transpose a_transpose, const T const auto context_cpp = Context(context); const auto device_cpp = Device(device); auto queue_cpp = Queue(context_cpp, device_cpp); - auto routine = XgemmBatched(queue_cpp, event); + auto routine = XgemmBatched(queue_cpp, nullptr); auto alphas_cpp = std::vector(); auto betas_cpp = std::vector(); auto a_offsets_cpp = std::vector(); diff --git a/src/cupp11.hpp b/src/cupp11.hpp index 988366ea..854c0be9 100644 --- a/src/cupp11.hpp +++ b/src/cupp11.hpp @@ -370,6 +370,8 @@ using ContextPointer = CUcontext*; // C++11 version of 'nvrtcProgram'. Additionally holds the program's source code. class Program { public: + Program() = default; + // Note that there is no constructor based on the regular CUDA data-type because of extra state // Source-based constructor with memory management @@ -404,7 +406,7 @@ public: // Confirms whether a certain status code is an actual compilation error or warning bool StatusIsCompilationWarningOrError(const nvrtcResult status) const { - return (status == NVRTC_ERROR_INVALID_INPUT); + return (status == NVRTC_ERROR_COMPILATION); } // Retrieves the warning/error message from the compiler (if any) @@ -433,8 +435,8 @@ public: const nvrtcProgram& operator()() const { return *program_; } private: std::shared_ptr program_; - const std::string source_; - const bool from_binary_; + std::string source_; + bool from_binary_; }; // ================================================================================================= @@ -730,7 +732,7 @@ public: // TODO: Implement this function void Launch(const Queue &queue, const std::vector &global, const std::vector &local, EventPointer event, - std::vector& waitForEvents) { + const std::vector& waitForEvents) { if (local.size() == 0) { throw LogicError("Kernel: launching with a default workgroup size is not implemented for the CUDA back-end"); } diff --git a/src/cxpp11_common.hpp b/src/cxpp11_common.hpp index 6ac008be..5097eac4 100644 --- a/src/cxpp11_common.hpp +++ b/src/cxpp11_common.hpp @@ -15,6 +15,7 @@ #ifndef CLBLAST_CXPP11_COMMON_H_ #define CLBLAST_CXPP11_COMMON_H_ +#include // strchr #include // std::string #include // std::runtime_error diff --git a/src/kernels/opencl_to_cuda.h b/src/kernels/opencl_to_cuda.h index 43a26a2f..2e46bc2b 100644 --- a/src/kernels/opencl_to_cuda.h +++ b/src/kernels/opencl_to_cuda.h @@ -11,6 +11,11 @@ // // ================================================================================================= +// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string +// literal). Comment-out this line for syntax-highlighting when developing. +R"( +// ================================================================================================= + // Replaces the OpenCL keywords with CUDA equivalent #define __kernel __placeholder__ #define __global @@ -49,3 +54,9 @@ typedef struct { float s0; float s1; float s2; float s3; float s12; float s13; float s14; float s15; } float16; // ================================================================================================= + +// End of the C++11 raw string literal +)" + +// ================================================================================================= + -- cgit v1.2.3 From 74d6e0048cfcdfd65ab29db47f5b4ffafba0bd51 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sat, 14 Oct 2017 12:23:35 +0200 Subject: Added DAXPY example for the CUDA API --- CMakeLists.txt | 2 +- samples/daxpy_cuda.cpp | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++ samples/sgemm_cuda.cpp | 4 +-- 3 files changed, 91 insertions(+), 3 deletions(-) create mode 100644 samples/daxpy_cuda.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 547d13e5..d4e47215 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -202,7 +202,7 @@ if(OPENCL) set(SAMPLE_PROGRAMS_C ${SAMPLE_PROGRAMS_C} sgemm_netlib) endif() elseif(CUDA) - set(SAMPLE_PROGRAMS_CPP sgemm_cuda) + set(SAMPLE_PROGRAMS_CPP daxpy_cuda sgemm_cuda) set(SAMPLE_PROGRAMS_C ) endif() diff --git a/samples/daxpy_cuda.cpp b/samples/daxpy_cuda.cpp new file mode 100644 index 00000000..cead3f6d --- /dev/null +++ b/samples/daxpy_cuda.cpp @@ -0,0 +1,88 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file demonstrates the use of the DAXPY routine with the C++ CUDA API of CLBlast. +// +// Note that this example is meant for illustration purposes only. CLBlast provides other programs +// for performance benchmarking ('client_xxxxx') and for correctness testing ('test_xxxxx'). +// +// ================================================================================================= + +#include +#include +#include + +// Includes the CUDA driver API +#include + +// Includes the CLBlast library +#include + +// ================================================================================================= + +// Example use of the double-precision Xaxpy routine DAXPY +int main() { + + // CUDA device selection + const auto device_id = 0; + + // Example DAXPY arguments + const size_t n = 8192; + const double alpha = 0.7; + + // Initializes the OpenCL device + cuInit(0); + CUdevice device; + cuDeviceGet(&device, device_id); + + // Creates the OpenCL context and stream + CUcontext context; + cuCtxCreate(&context, 0, device); + CUstream stream; + cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING); + + // Populate host matrices with some example data + auto host_a = std::vector(n); + auto host_b = std::vector(n); + for (auto &item: host_a) { item = 12.193; } + for (auto &item: host_b) { item = -8.199; } + + // Copy the matrices to the device + CUdeviceptr device_a; + CUdeviceptr device_b; + cuMemAlloc(&device_a, host_a.size()*sizeof(double)); + cuMemAlloc(&device_b, host_b.size()*sizeof(double)); + cuMemcpyHtoDAsync(device_a, host_a.data(), host_a.size()*sizeof(double), stream); + cuMemcpyHtoDAsync(device_b, host_b.data(), host_b.size()*sizeof(double), stream); + + // Start the timer + auto start_time = std::chrono::steady_clock::now(); + + // Call the DAXPY routine. Note that the type of alpha (double) determines the precision. + const auto status = clblast::Axpy(n, alpha, + device_a, 0, 1, + device_b, 0, 1, + context, device); + cuStreamSynchronize(stream); + + // Record the execution time + auto elapsed_time = std::chrono::steady_clock::now() - start_time; + auto time_ms = std::chrono::duration(elapsed_time).count(); + + // Example completed. See "clblast_cuda.h" for status codes (0 -> success). + printf("Completed DAXPY in %.3lf ms with status %d\n", time_ms, static_cast(status)); + + // Clean-up + cuMemFree(device_a); + cuMemFree(device_b); + cuStreamDestroy(stream); + return 0; +} + +// ================================================================================================= diff --git a/samples/sgemm_cuda.cpp b/samples/sgemm_cuda.cpp index f1138316..8e4397df 100644 --- a/samples/sgemm_cuda.cpp +++ b/samples/sgemm_cuda.cpp @@ -69,8 +69,8 @@ int main() { cuMemAlloc(&device_b, host_b.size()*sizeof(float)); cuMemAlloc(&device_c, host_c.size()*sizeof(float)); cuMemcpyHtoDAsync(device_a, host_a.data(), host_a.size()*sizeof(float), stream); - cuMemcpyHtoDAsync(device_b, host_c.data(), host_b.size()*sizeof(float), stream); - cuMemcpyHtoDAsync(device_c, host_b.data(), host_c.size()*sizeof(float), stream); + cuMemcpyHtoDAsync(device_b, host_b.data(), host_b.size()*sizeof(float), stream); + cuMemcpyHtoDAsync(device_c, host_c.data(), host_c.size()*sizeof(float), stream); // Start the timer auto start_time = std::chrono::steady_clock::now(); -- cgit v1.2.3 From 313fc796b2a3063cab7b5847864a524efb69aee4 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sat, 14 Oct 2017 16:01:12 +0200 Subject: Fixed several (not all) CUDA kernel compilation issues --- src/kernels/common.opencl | 27 +++++++++++++----------- src/kernels/opencl_to_cuda.h | 49 +++++++++++++++++++++++++++++--------------- 2 files changed, 48 insertions(+), 28 deletions(-) diff --git a/src/kernels/common.opencl b/src/kernels/common.opencl index 9481881e..a34877d9 100644 --- a/src/kernels/common.opencl +++ b/src/kernels/common.opencl @@ -23,15 +23,18 @@ R"( #endif // ================================================================================================= +#ifndef CUDA -// Enable support for double-precision -#if PRECISION == 16 - #pragma OPENCL EXTENSION cl_khr_fp16: enable -#endif + // Enable support for double-precision + #if PRECISION == 16 + #pragma OPENCL EXTENSION cl_khr_fp16: enable + #endif + + // Enable support for double-precision + #if PRECISION == 64 || PRECISION == 6464 + #pragma OPENCL EXTENSION cl_khr_fp64: enable + #endif -// Enable support for double-precision -#if PRECISION == 64 || PRECISION == 6464 - #pragma OPENCL EXTENSION cl_khr_fp64: enable #endif // Half-precision @@ -254,18 +257,18 @@ R"( // http://docs.nvidia.com/cuda/samples/6_Advanced/transpose/doc/MatrixTranspose.pdf // More details: https://github.com/CNugteren/CLBlast/issues/53 #if USE_STAGGERED_INDICES == 1 - INLINE_FUNC size_t GetGroupIDFlat() { + INLINE_FUNC int GetGroupIDFlat() { return get_group_id(0) + get_num_groups(0) * get_group_id(1); } - INLINE_FUNC size_t GetGroupID1() { + INLINE_FUNC int GetGroupID1() { return (GetGroupIDFlat()) % get_num_groups(1); } - INLINE_FUNC size_t GetGroupID0() { + INLINE_FUNC int GetGroupID0() { return ((GetGroupIDFlat() / get_num_groups(1)) + GetGroupID1()) % get_num_groups(0); } #else - INLINE_FUNC size_t GetGroupID1() { return get_group_id(1); } - INLINE_FUNC size_t GetGroupID0() { return get_group_id(0); } + INLINE_FUNC int GetGroupID1() { return get_group_id(1); } + INLINE_FUNC int GetGroupID0() { return get_group_id(0); } #endif // ================================================================================================= diff --git a/src/kernels/opencl_to_cuda.h b/src/kernels/opencl_to_cuda.h index 2e46bc2b..94a1549e 100644 --- a/src/kernels/opencl_to_cuda.h +++ b/src/kernels/opencl_to_cuda.h @@ -16,32 +16,28 @@ R"( // ================================================================================================= -// Replaces the OpenCL keywords with CUDA equivalent -#define __kernel __placeholder__ -#define __global -#define __placeholder__ extern "C" __global__ -#define __local __shared__ -#define restrict __restrict__ -#define __constant const -#define inline __device__ inline // assumes all device functions are annotated with inline in OpenCL - -// Replaces OpenCL synchronisation with CUDA synchronisation -#define barrier(x) __syncthreads() +// CLBlast specific additions +#define CUDA 1 // Replaces the OpenCL get_xxx_ID with CUDA equivalents -__device__ int get_local_id(int x) { +__device__ int get_local_id(const int x) { if (x == 0) { return threadIdx.x; } if (x == 1) { return threadIdx.y; } return threadIdx.z; } -__device__ int get_group_id(int x) { +__device__ int get_group_id(const int x) { if (x == 0) { return blockIdx.x; } - if (x == 1) { return blockIdx.y;} + if (x == 1) { return blockIdx.y; } return blockIdx.z; } -__device__ int get_global_id(int x) { +__device__ int get_global_size(const int x) { + if (x == 0) { return gridDim.x; } + if (x == 1) { return gridDim.y; } + return gridDim.z; +} +__device__ int get_global_id(const int x) { if (x == 0) { return blockIdx.x*blockDim.x + threadIdx.x; } - if (y == 0) { return blockIdx.y*blockDim.y + threadIdx.y; } + if (x == 1) { return blockIdx.y*blockDim.y + threadIdx.y; } return blockIdx.z*blockDim.z + threadIdx.z; } @@ -52,6 +48,27 @@ typedef struct { float s0; float s1; float s2; float s3; float s4; float s5; float s6; float s7; float s8; float s9; float s10; float s11; float s12; float s13; float s14; float s15; } float16; +typedef struct { double s0; double s1; double s2; double s3; + double s4; double s5; double s6; double s7; } double8; +typedef struct { double s0; double s1; double s2; double s3; + double s4; double s5; double s6; double s7; + double s8; double s9; double s10; double s11; + double s12; double s13; double s14; double s15; } double16; + +// Replaces the OpenCL keywords with CUDA equivalent +#define __kernel __placeholder__ +#define __global +#define __placeholder__ extern "C" __global__ +#define __local __shared__ +#define restrict __restrict__ +#define __constant const +#define inline __device__ // assumes all device functions are annotated with inline in OpenCL + +// Kernel attributes (don't replace currently) +#define reqd_work_group_size(x, y, z) + +// Replaces OpenCL synchronisation with CUDA synchronisation +#define barrier(x) __syncthreads() // ================================================================================================= -- cgit v1.2.3 From d9456306e0ee16f18f788106cd8ba74a81bf2e31 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sat, 14 Oct 2017 16:48:06 +0200 Subject: Made transpose kernel struct init proper according to the C standard --- src/kernels/level3/transpose_fast.opencl | 60 ++++++++++++++++---------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/src/kernels/level3/transpose_fast.opencl b/src/kernels/level3/transpose_fast.opencl index 70156d3a..5f9ba209 100644 --- a/src/kernels/level3/transpose_fast.opencl +++ b/src/kernels/level3/transpose_fast.opencl @@ -84,39 +84,39 @@ void TransposeMatrixFast(const int ld, #if TRA_WPT == 1 results[0] = v[0]; #elif TRA_WPT == 2 - results[0] = (realT) {v[0].x, v[1].x}; - results[1] = (realT) {v[0].y, v[1].y}; + results[0].x = v[0].x; results[0].y = v[1].x; + results[1].x = v[0].y; results[1].y = v[1].y; #elif TRA_WPT == 4 - results[0] = (realT) {v[0].x, v[1].x, v[2].x, v[3].x}; - results[1] = (realT) {v[0].y, v[1].y, v[2].y, v[3].y}; - results[2] = (realT) {v[0].z, v[1].z, v[2].z, v[3].z}; - results[3] = (realT) {v[0].w, v[1].w, v[2].w, v[3].w}; + results[0].x = v[0].x; results[0].y; = v[1].x; results[0].z = v[2].x; results[0].w = v[3].x; + results[1].x = v[0].y; results[1].y; = v[1].y; results[1].z = v[2].y; results[1].w = v[3].y; + results[2].x = v[0].z; results[2].y; = v[1].z; results[2].z = v[2].z; results[2].w = v[3].z; + results[3].x = v[0].w; results[3].y; = v[1].w; results[3].z = v[2].w; results[3].w = v[3].w; #elif TRA_WPT == 8 - results[0] = (realT) {v[0].s0, v[1].s0, v[2].s0, v[3].s0, v[4].s0, v[5].s0, v[6].s0, v[7].s0}; - results[1] = (realT) {v[0].s1, v[1].s1, v[2].s1, v[3].s1, v[4].s1, v[5].s1, v[6].s1, v[7].s1}; - results[2] = (realT) {v[0].s2, v[1].s2, v[2].s2, v[3].s2, v[4].s2, v[5].s2, v[6].s2, v[7].s2}; - results[3] = (realT) {v[0].s3, v[1].s3, v[2].s3, v[3].s3, v[4].s3, v[5].s3, v[6].s3, v[7].s3}; - results[4] = (realT) {v[0].s4, v[1].s4, v[2].s4, v[3].s4, v[4].s4, v[5].s4, v[6].s4, v[7].s4}; - results[5] = (realT) {v[0].s5, v[1].s5, v[2].s5, v[3].s5, v[4].s5, v[5].s5, v[6].s5, v[7].s5}; - results[6] = (realT) {v[0].s6, v[1].s6, v[2].s6, v[3].s6, v[4].s6, v[5].s6, v[6].s6, v[7].s6}; - results[7] = (realT) {v[0].s7, v[1].s7, v[2].s7, v[3].s7, v[4].s7, v[5].s7, v[6].s7, v[7].s7}; + results[0].s0 = v[0].s0; results[0].s1 = v[1].s0; results[0].s2 = v[2].s0; results[0].s3 = v[3].s0; results[0].s4 = v[4].s0; results[0].s5 = v[5].s0; results[0].s6 = v[6].s0; results[0].s7 = v[7].s0; + results[1].s0 = v[0].s1; results[1].s1 = v[1].s1; results[1].s2 = v[2].s1; results[1].s3 = v[3].s1; results[1].s4 = v[4].s1; results[1].s5 = v[5].s1; results[1].s6 = v[6].s1; results[1].s7 = v[7].s1; + results[2].s0 = v[0].s2; results[2].s1 = v[1].s2; results[2].s2 = v[2].s2; results[2].s3 = v[3].s2; results[2].s4 = v[4].s2; results[2].s5 = v[5].s2; results[2].s6 = v[6].s2; results[2].s7 = v[7].s2; + results[3].s0 = v[0].s3; results[3].s1 = v[1].s3; results[3].s2 = v[2].s3; results[3].s3 = v[3].s3; results[3].s4 = v[4].s3; results[3].s5 = v[5].s3; results[3].s6 = v[6].s3; results[3].s7 = v[7].s3; + results[4].s0 = v[0].s4; results[4].s1 = v[1].s4; results[4].s2 = v[2].s4; results[4].s3 = v[3].s4; results[4].s4 = v[4].s4; results[4].s5 = v[5].s4; results[4].s6 = v[6].s4; results[4].s7 = v[7].s4; + results[5].s0 = v[0].s5; results[5].s1 = v[1].s5; results[5].s2 = v[2].s5; results[5].s3 = v[3].s5; results[5].s4 = v[4].s5; results[5].s5 = v[5].s5; results[5].s6 = v[6].s5; results[5].s7 = v[7].s5; + results[6].s0 = v[0].s6; results[6].s1 = v[1].s6; results[6].s2 = v[2].s6; results[6].s3 = v[3].s6; results[6].s4 = v[4].s6; results[6].s5 = v[5].s6; results[6].s6 = v[6].s6; results[6].s7 = v[7].s6; + results[7].s0 = v[0].s7; results[7].s1 = v[1].s7; results[7].s2 = v[2].s7; results[7].s3 = v[3].s7; results[7].s4 = v[4].s7; results[7].s5 = v[5].s7; results[7].s6 = v[6].s7; results[7].s7 = v[7].s7; #elif TRA_WPT == 16 - results[ 0] = (realT) {v[0].s0, v[1].s0, v[2].s0, v[3].s0, v[4].s0, v[5].s0, v[6].s0, v[7].s0, v[8].s0, v[9].s0, v[10].s0, v[11].s0, v[12].s0, v[13].s0, v[14].s0, v[15].s0}; - results[ 1] = (realT) {v[0].s1, v[1].s1, v[2].s1, v[3].s1, v[4].s1, v[5].s1, v[6].s1, v[7].s1, v[8].s1, v[9].s1, v[10].s1, v[11].s1, v[12].s1, v[13].s1, v[14].s1, v[15].s1}; - results[ 2] = (realT) {v[0].s2, v[1].s2, v[2].s2, v[3].s2, v[4].s2, v[5].s2, v[6].s2, v[7].s2, v[8].s2, v[9].s2, v[10].s2, v[11].s2, v[12].s2, v[13].s2, v[14].s2, v[15].s2}; - results[ 3] = (realT) {v[0].s3, v[1].s3, v[2].s3, v[3].s3, v[4].s3, v[5].s3, v[6].s3, v[7].s3, v[8].s3, v[9].s3, v[10].s3, v[11].s3, v[12].s3, v[13].s3, v[14].s3, v[15].s3}; - results[ 4] = (realT) {v[0].s4, v[1].s4, v[2].s4, v[3].s4, v[4].s4, v[5].s4, v[6].s4, v[7].s4, v[8].s4, v[9].s4, v[10].s4, v[11].s4, v[12].s4, v[13].s4, v[14].s4, v[15].s4}; - results[ 5] = (realT) {v[0].s5, v[1].s5, v[2].s5, v[3].s5, v[4].s5, v[5].s5, v[6].s5, v[7].s5, v[8].s5, v[9].s5, v[10].s5, v[11].s5, v[12].s5, v[13].s5, v[14].s5, v[15].s5}; - results[ 6] = (realT) {v[0].s6, v[1].s6, v[2].s6, v[3].s6, v[4].s6, v[5].s6, v[6].s6, v[7].s6, v[8].s6, v[9].s6, v[10].s6, v[11].s6, v[12].s6, v[13].s6, v[14].s6, v[15].s6}; - results[ 7] = (realT) {v[0].s7, v[1].s7, v[2].s7, v[3].s7, v[4].s7, v[5].s7, v[6].s7, v[7].s7, v[8].s7, v[9].s7, v[10].s7, v[11].s7, v[12].s7, v[13].s7, v[14].s7, v[15].s7}; - results[ 8] = (realT) {v[0].s8, v[1].s8, v[2].s8, v[3].s8, v[4].s8, v[5].s8, v[6].s8, v[7].s8, v[8].s8, v[9].s8, v[10].s8, v[11].s8, v[12].s8, v[13].s8, v[14].s8, v[15].s8}; - results[ 9] = (realT) {v[0].s9, v[1].s9, v[2].s9, v[3].s9, v[4].s9, v[5].s9, v[6].s9, v[7].s9, v[8].s9, v[9].s9, v[10].s9, v[11].s9, v[12].s9, v[13].s9, v[14].s9, v[15].s9}; - results[10] = (realT) {v[0].sA, v[1].sA, v[2].sA, v[3].sA, v[4].sA, v[5].sA, v[6].sA, v[7].sA, v[8].sA, v[9].sA, v[10].sA, v[11].sA, v[12].sA, v[13].sA, v[14].sA, v[15].sA}; - results[11] = (realT) {v[0].sB, v[1].sB, v[2].sB, v[3].sB, v[4].sB, v[5].sB, v[6].sB, v[7].sB, v[8].sB, v[9].sB, v[10].sB, v[11].sB, v[12].sB, v[13].sB, v[14].sB, v[15].sB}; - results[12] = (realT) {v[0].sC, v[1].sC, v[2].sC, v[3].sC, v[4].sC, v[5].sC, v[6].sC, v[7].sC, v[8].sC, v[9].sC, v[10].sC, v[11].sC, v[12].sC, v[13].sC, v[14].sC, v[15].sC}; - results[13] = (realT) {v[0].sD, v[1].sD, v[2].sD, v[3].sD, v[4].sD, v[5].sD, v[6].sD, v[7].sD, v[8].sD, v[9].sD, v[10].sD, v[11].sD, v[12].sD, v[13].sD, v[14].sD, v[15].sD}; - results[14] = (realT) {v[0].sE, v[1].sE, v[2].sE, v[3].sE, v[4].sE, v[5].sE, v[6].sE, v[7].sE, v[8].sE, v[9].sE, v[10].sE, v[11].sE, v[12].sE, v[13].sE, v[14].sE, v[15].sE}; - results[15] = (realT) {v[0].sF, v[1].sF, v[2].sF, v[3].sF, v[4].sF, v[5].sF, v[6].sF, v[7].sF, v[8].sF, v[9].sF, v[10].sF, v[11].sF, v[12].sF, v[13].sF, v[14].sF, v[15].sF}; + results[ 0].s0 = v[0].s0; results[ 0].s1 = v[1].s0; results[ 0].s2 = v[2].s0; results[ 0].s3 = v[3].s0; results[ 0].s4 = v[4].s0; results[ 0].s5 = v[5].s0; results[ 0].s6 = v[6].s0; results[ 0].s7 = v[7].s0; results[ 0].s8 = v[8].s0; results[ 0].s9 = v[9].s0; results[ 0].sA = v[10].s0;, results[ 0].sB = v[11].s0;, results[ 0].sC = v[12].s0;, results[ 0].sD = v[13].s0;, results[ 0].sE = v[14].s0;, results[ 0].sF = v[15].s0; + results[ 1].s0 = v[0].s1; results[ 1].s1 = v[1].s1; results[ 1].s2 = v[2].s1; results[ 1].s3 = v[3].s1; results[ 1].s4 = v[4].s1; results[ 1].s5 = v[5].s1; results[ 1].s6 = v[6].s1; results[ 1].s7 = v[7].s1; results[ 1].s8 = v[8].s1; results[ 1].s9 = v[9].s1; results[ 1].sA = v[10].s1;, results[ 1].sB = v[11].s1;, results[ 1].sC = v[12].s1;, results[ 1].sD = v[13].s1;, results[ 1].sE = v[14].s1;, results[ 1].sF = v[15].s1; + results[ 2].s0 = v[0].s2; results[ 2].s1 = v[1].s2; results[ 2].s2 = v[2].s2; results[ 2].s3 = v[3].s2; results[ 2].s4 = v[4].s2; results[ 2].s5 = v[5].s2; results[ 2].s6 = v[6].s2; results[ 2].s7 = v[7].s2; results[ 2].s8 = v[8].s2; results[ 2].s9 = v[9].s2; results[ 2].sA = v[10].s2;, results[ 2].sB = v[11].s2;, results[ 2].sC = v[12].s2;, results[ 2].sD = v[13].s2;, results[ 2].sE = v[14].s2;, results[ 2].sF = v[15].s2; + results[ 3].s0 = v[0].s3; results[ 3].s1 = v[1].s3; results[ 3].s2 = v[2].s3; results[ 3].s3 = v[3].s3; results[ 3].s4 = v[4].s3; results[ 3].s5 = v[5].s3; results[ 3].s6 = v[6].s3; results[ 3].s7 = v[7].s3; results[ 3].s8 = v[8].s3; results[ 3].s9 = v[9].s3; results[ 3].sA = v[10].s3;, results[ 3].sB = v[11].s3;, results[ 3].sC = v[12].s3;, results[ 3].sD = v[13].s3;, results[ 3].sE = v[14].s3;, results[ 3].sF = v[15].s3; + results[ 4].s0 = v[0].s4; results[ 4].s1 = v[1].s4; results[ 4].s2 = v[2].s4; results[ 4].s3 = v[3].s4; results[ 4].s4 = v[4].s4; results[ 4].s5 = v[5].s4; results[ 4].s6 = v[6].s4; results[ 4].s7 = v[7].s4; results[ 4].s8 = v[8].s4; results[ 4].s9 = v[9].s4; results[ 4].sA = v[10].s4;, results[ 4].sB = v[11].s4;, results[ 4].sC = v[12].s4;, results[ 4].sD = v[13].s4;, results[ 4].sE = v[14].s4;, results[ 4].sF = v[15].s4; + results[ 5].s0 = v[0].s5; results[ 5].s1 = v[1].s5; results[ 5].s2 = v[2].s5; results[ 5].s3 = v[3].s5; results[ 5].s4 = v[4].s5; results[ 5].s5 = v[5].s5; results[ 5].s6 = v[6].s5; results[ 5].s7 = v[7].s5; results[ 5].s8 = v[8].s5; results[ 5].s9 = v[9].s5; results[ 5].sA = v[10].s5;, results[ 5].sB = v[11].s5;, results[ 5].sC = v[12].s5;, results[ 5].sD = v[13].s5;, results[ 5].sE = v[14].s5;, results[ 5].sF = v[15].s5; + results[ 6].s0 = v[0].s6; results[ 6].s1 = v[1].s6; results[ 6].s2 = v[2].s6; results[ 6].s3 = v[3].s6; results[ 6].s4 = v[4].s6; results[ 6].s5 = v[5].s6; results[ 6].s6 = v[6].s6; results[ 6].s7 = v[7].s6; results[ 6].s8 = v[8].s6; results[ 6].s9 = v[9].s6; results[ 6].sA = v[10].s6;, results[ 6].sB = v[11].s6;, results[ 6].sC = v[12].s6;, results[ 6].sD = v[13].s6;, results[ 6].sE = v[14].s6;, results[ 6].sF = v[15].s6; + results[ 7].s0 = v[0].s7; results[ 7].s1 = v[1].s7; results[ 7].s2 = v[2].s7; results[ 7].s3 = v[3].s7; results[ 7].s4 = v[4].s7; results[ 7].s5 = v[5].s7; results[ 7].s6 = v[6].s7; results[ 7].s7 = v[7].s7; results[ 7].s8 = v[8].s7; results[ 7].s9 = v[9].s7; results[ 7].sA = v[10].s7;, results[ 7].sB = v[11].s7;, results[ 7].sC = v[12].s7;, results[ 7].sD = v[13].s7;, results[ 7].sE = v[14].s7;, results[ 7].sF = v[15].s7; + results[ 8].s0 = v[0].s8; results[ 8].s1 = v[1].s8; results[ 8].s2 = v[2].s8; results[ 8].s3 = v[3].s8; results[ 8].s4 = v[4].s8; results[ 8].s5 = v[5].s8; results[ 8].s6 = v[6].s8; results[ 8].s7 = v[7].s8; results[ 8].s8 = v[8].s8; results[ 8].s9 = v[9].s8; results[ 8].sA = v[10].s8;, results[ 8].sB = v[11].s8;, results[ 8].sC = v[12].s8;, results[ 8].sD = v[13].s8;, results[ 8].sE = v[14].s8;, results[ 8].sF = v[15].s8; + results[ 9].s0 = v[0].s9; results[ 9].s1 = v[1].s9; results[ 9].s2 = v[2].s9; results[ 9].s3 = v[3].s9; results[ 9].s4 = v[4].s9; results[ 9].s5 = v[5].s9; results[ 9].s6 = v[6].s9; results[ 9].s7 = v[7].s9; results[ 9].s8 = v[8].s9; results[ 9].s9 = v[9].s9; results[ 9].sA = v[10].s9;, results[ 9].sB = v[11].s9;, results[ 9].sC = v[12].s9;, results[ 9].sD = v[13].s9;, results[ 9].sE = v[14].s9;, results[ 9].sF = v[15].s9; + results[10].s0 = v[0].sA; results[10].s1 = v[1].sA; results[10].s2 = v[2].sA; results[10].s3 = v[3].sA; results[10].s4 = v[4].sA; results[10].s5 = v[5].sA; results[10].s6 = v[6].sA; results[10].s7 = v[7].sA; results[10].s8 = v[8].sA; results[10].s9 = v[9].sA; results[10].sA = v[10].sA;, results[10].sB = v[11].sA;, results[10].sC = v[12].sA;, results[10].sD = v[13].sA;, results[10].sE = v[14].sA;, results[10].sF = v[15].sA; + results[11].s0 = v[0].sB; results[11].s1 = v[1].sB; results[11].s2 = v[2].sB; results[11].s3 = v[3].sB; results[11].s4 = v[4].sB; results[11].s5 = v[5].sB; results[11].s6 = v[6].sB; results[11].s7 = v[7].sB; results[11].s8 = v[8].sB; results[11].s9 = v[9].sB; results[11].sA = v[10].sB;, results[11].sB = v[11].sB;, results[11].sC = v[12].sB;, results[11].sD = v[13].sB;, results[11].sE = v[14].sB;, results[11].sF = v[15].sB; + results[12].s0 = v[0].sC; results[12].s1 = v[1].sC; results[12].s2 = v[2].sC; results[12].s3 = v[3].sC; results[12].s4 = v[4].sC; results[12].s5 = v[5].sC; results[12].s6 = v[6].sC; results[12].s7 = v[7].sC; results[12].s8 = v[8].sC; results[12].s9 = v[9].sC; results[12].sA = v[10].sC;, results[12].sB = v[11].sC;, results[12].sC = v[12].sC;, results[12].sD = v[13].sC;, results[12].sE = v[14].sC;, results[12].sF = v[15].sC; + results[13].s0 = v[0].sD; results[13].s1 = v[1].sD; results[13].s2 = v[2].sD; results[13].s3 = v[3].sD; results[13].s4 = v[4].sD; results[13].s5 = v[5].sD; results[13].s6 = v[6].sD; results[13].s7 = v[7].sD; results[13].s8 = v[8].sD; results[13].s9 = v[9].sD; results[13].sA = v[10].sD;, results[13].sB = v[11].sD;, results[13].sC = v[12].sD;, results[13].sD = v[13].sD;, results[13].sE = v[14].sD;, results[13].sF = v[15].sD; + results[14].s0 = v[0].sE; results[14].s1 = v[1].sE; results[14].s2 = v[2].sE; results[14].s3 = v[3].sE; results[14].s4 = v[4].sE; results[14].s5 = v[5].sE; results[14].s6 = v[6].sE; results[14].s7 = v[7].sE; results[14].s8 = v[8].sE; results[14].s9 = v[9].sE; results[14].sA = v[10].sE;, results[14].sB = v[11].sE;, results[14].sC = v[12].sE;, results[14].sD = v[13].sE;, results[14].sE = v[14].sE;, results[14].sF = v[15].sE; + results[15].s0 = v[0].sF; results[15].s1 = v[1].sF; results[15].s2 = v[2].sF; results[15].s3 = v[3].sF; results[15].s4 = v[4].sF; results[15].s5 = v[5].sF; results[15].s6 = v[6].sF; results[15].s7 = v[7].sF; results[15].s8 = v[8].sF; results[15].s9 = v[9].sF; results[15].sA = v[10].sF;, results[15].sB = v[11].sF;, results[15].sC = v[12].sF;, results[15].sD = v[13].sF;, results[15].sE = v[14].sF;, results[15].sF = v[15].sF; #endif // Multiplies by alpha and then stores the results into the destination matrix -- cgit v1.2.3 From b06bc01da90983ce484fded4e1a87f5fcd5c4eca Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sat, 14 Oct 2017 17:13:54 +0200 Subject: Make local memory pointers a define in OpenCL; some fixes to the recently changed transpose kernel code --- src/kernels/common.opencl | 10 ++++-- src/kernels/level3/invert_diagonal_blocks.opencl | 6 ++-- src/kernels/level3/transpose_fast.opencl | 40 ++++++++++++------------ src/kernels/level3/transpose_pad.opencl | 4 +-- src/kernels/level3/xgemm_direct_part1.opencl | 4 +-- src/kernels/level3/xgemm_direct_part2.opencl | 12 +++---- src/kernels/level3/xgemm_direct_part3.opencl | 2 +- src/kernels/level3/xgemm_part1.opencl | 8 ++--- src/kernels/level3/xgemm_part3.opencl | 6 ++-- src/kernels/opencl_to_cuda.h | 1 + 10 files changed, 49 insertions(+), 44 deletions(-) diff --git a/src/kernels/common.opencl b/src/kernels/common.opencl index a34877d9..01c411bc 100644 --- a/src/kernels/common.opencl +++ b/src/kernels/common.opencl @@ -23,8 +23,8 @@ R"( #endif // ================================================================================================= -#ifndef CUDA +#ifndef CUDA // Enable support for double-precision #if PRECISION == 16 #pragma OPENCL EXTENSION cl_khr_fp16: enable @@ -34,7 +34,6 @@ R"( #if PRECISION == 64 || PRECISION == 6464 #pragma OPENCL EXTENSION cl_khr_fp64: enable #endif - #endif // Half-precision @@ -120,10 +119,15 @@ R"( #define GetRealArg(x) x #endif +// Pointers to local memory objects (using a define because CUDA doesn't need them) +#ifndef LOCAL_PTR + #define LOCAL_PTR __local +#endif + // ================================================================================================= // Don't use the non-IEEE754 compliant OpenCL built-in mad() instruction per default. For specific -// devices, this is enabled (see src/routine.cc). +// devices, this is enabled (see src/routine.cpp). #ifndef USE_CL_MAD #define USE_CL_MAD 0 #endif diff --git a/src/kernels/level3/invert_diagonal_blocks.opencl b/src/kernels/level3/invert_diagonal_blocks.opencl index 93241700..281fdcff 100644 --- a/src/kernels/level3/invert_diagonal_blocks.opencl +++ b/src/kernels/level3/invert_diagonal_blocks.opencl @@ -164,7 +164,7 @@ void InvertDiagonalBlock(int n, __global const real* restrict src, const int src // ================================================================================================= // Triple matrix-multiplication kernel: C = A * B -INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, __local real* blm, int n, +INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, LOCAL_PTR real* blm, int n, __global const real* agm, __global const real* bgm, __global real* cgm, const int lda, const int ldb, const int ldc, int current_size, int num_pages, const int block_size) { @@ -250,7 +250,7 @@ INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, // ================================================================================================= // Triple matrix-multiplication kernel part 1: B12 = A12 * B22 (upper) or B21 = A21 * B11 (lower) -INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, __local real* blm, int n, +INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, LOCAL_PTR real* blm, int n, __global const real* src, const int a_offset, const int lda, __global real* dest, int current_size, int num_pages, const int block_size) { @@ -286,7 +286,7 @@ INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, __local rea } // Triple matrix-multiplication kernel part 1: B12 = -B11 * B12 (upper) or B21 = -B22 * B21 (lower) -INLINE_FUNC void TripleMatMulPart2(const int size, const bool upper, __local real* blm, const int n, +INLINE_FUNC void TripleMatMulPart2(const int size, const bool upper, LOCAL_PTR real* blm, const int n, __global real* dest, int current_size, int num_pages, const int block_size) { // Emulates a 3D grid: NX * (NY * num_pages) diff --git a/src/kernels/level3/transpose_fast.opencl b/src/kernels/level3/transpose_fast.opencl index 5f9ba209..37b25d99 100644 --- a/src/kernels/level3/transpose_fast.opencl +++ b/src/kernels/level3/transpose_fast.opencl @@ -87,10 +87,10 @@ void TransposeMatrixFast(const int ld, results[0].x = v[0].x; results[0].y = v[1].x; results[1].x = v[0].y; results[1].y = v[1].y; #elif TRA_WPT == 4 - results[0].x = v[0].x; results[0].y; = v[1].x; results[0].z = v[2].x; results[0].w = v[3].x; - results[1].x = v[0].y; results[1].y; = v[1].y; results[1].z = v[2].y; results[1].w = v[3].y; - results[2].x = v[0].z; results[2].y; = v[1].z; results[2].z = v[2].z; results[2].w = v[3].z; - results[3].x = v[0].w; results[3].y; = v[1].w; results[3].z = v[2].w; results[3].w = v[3].w; + results[0].x = v[0].x; results[0].y = v[1].x; results[0].z = v[2].x; results[0].w = v[3].x; + results[1].x = v[0].y; results[1].y = v[1].y; results[1].z = v[2].y; results[1].w = v[3].y; + results[2].x = v[0].z; results[2].y = v[1].z; results[2].z = v[2].z; results[2].w = v[3].z; + results[3].x = v[0].w; results[3].y = v[1].w; results[3].z = v[2].w; results[3].w = v[3].w; #elif TRA_WPT == 8 results[0].s0 = v[0].s0; results[0].s1 = v[1].s0; results[0].s2 = v[2].s0; results[0].s3 = v[3].s0; results[0].s4 = v[4].s0; results[0].s5 = v[5].s0; results[0].s6 = v[6].s0; results[0].s7 = v[7].s0; results[1].s0 = v[0].s1; results[1].s1 = v[1].s1; results[1].s2 = v[2].s1; results[1].s3 = v[3].s1; results[1].s4 = v[4].s1; results[1].s5 = v[5].s1; results[1].s6 = v[6].s1; results[1].s7 = v[7].s1; @@ -101,22 +101,22 @@ void TransposeMatrixFast(const int ld, results[6].s0 = v[0].s6; results[6].s1 = v[1].s6; results[6].s2 = v[2].s6; results[6].s3 = v[3].s6; results[6].s4 = v[4].s6; results[6].s5 = v[5].s6; results[6].s6 = v[6].s6; results[6].s7 = v[7].s6; results[7].s0 = v[0].s7; results[7].s1 = v[1].s7; results[7].s2 = v[2].s7; results[7].s3 = v[3].s7; results[7].s4 = v[4].s7; results[7].s5 = v[5].s7; results[7].s6 = v[6].s7; results[7].s7 = v[7].s7; #elif TRA_WPT == 16 - results[ 0].s0 = v[0].s0; results[ 0].s1 = v[1].s0; results[ 0].s2 = v[2].s0; results[ 0].s3 = v[3].s0; results[ 0].s4 = v[4].s0; results[ 0].s5 = v[5].s0; results[ 0].s6 = v[6].s0; results[ 0].s7 = v[7].s0; results[ 0].s8 = v[8].s0; results[ 0].s9 = v[9].s0; results[ 0].sA = v[10].s0;, results[ 0].sB = v[11].s0;, results[ 0].sC = v[12].s0;, results[ 0].sD = v[13].s0;, results[ 0].sE = v[14].s0;, results[ 0].sF = v[15].s0; - results[ 1].s0 = v[0].s1; results[ 1].s1 = v[1].s1; results[ 1].s2 = v[2].s1; results[ 1].s3 = v[3].s1; results[ 1].s4 = v[4].s1; results[ 1].s5 = v[5].s1; results[ 1].s6 = v[6].s1; results[ 1].s7 = v[7].s1; results[ 1].s8 = v[8].s1; results[ 1].s9 = v[9].s1; results[ 1].sA = v[10].s1;, results[ 1].sB = v[11].s1;, results[ 1].sC = v[12].s1;, results[ 1].sD = v[13].s1;, results[ 1].sE = v[14].s1;, results[ 1].sF = v[15].s1; - results[ 2].s0 = v[0].s2; results[ 2].s1 = v[1].s2; results[ 2].s2 = v[2].s2; results[ 2].s3 = v[3].s2; results[ 2].s4 = v[4].s2; results[ 2].s5 = v[5].s2; results[ 2].s6 = v[6].s2; results[ 2].s7 = v[7].s2; results[ 2].s8 = v[8].s2; results[ 2].s9 = v[9].s2; results[ 2].sA = v[10].s2;, results[ 2].sB = v[11].s2;, results[ 2].sC = v[12].s2;, results[ 2].sD = v[13].s2;, results[ 2].sE = v[14].s2;, results[ 2].sF = v[15].s2; - results[ 3].s0 = v[0].s3; results[ 3].s1 = v[1].s3; results[ 3].s2 = v[2].s3; results[ 3].s3 = v[3].s3; results[ 3].s4 = v[4].s3; results[ 3].s5 = v[5].s3; results[ 3].s6 = v[6].s3; results[ 3].s7 = v[7].s3; results[ 3].s8 = v[8].s3; results[ 3].s9 = v[9].s3; results[ 3].sA = v[10].s3;, results[ 3].sB = v[11].s3;, results[ 3].sC = v[12].s3;, results[ 3].sD = v[13].s3;, results[ 3].sE = v[14].s3;, results[ 3].sF = v[15].s3; - results[ 4].s0 = v[0].s4; results[ 4].s1 = v[1].s4; results[ 4].s2 = v[2].s4; results[ 4].s3 = v[3].s4; results[ 4].s4 = v[4].s4; results[ 4].s5 = v[5].s4; results[ 4].s6 = v[6].s4; results[ 4].s7 = v[7].s4; results[ 4].s8 = v[8].s4; results[ 4].s9 = v[9].s4; results[ 4].sA = v[10].s4;, results[ 4].sB = v[11].s4;, results[ 4].sC = v[12].s4;, results[ 4].sD = v[13].s4;, results[ 4].sE = v[14].s4;, results[ 4].sF = v[15].s4; - results[ 5].s0 = v[0].s5; results[ 5].s1 = v[1].s5; results[ 5].s2 = v[2].s5; results[ 5].s3 = v[3].s5; results[ 5].s4 = v[4].s5; results[ 5].s5 = v[5].s5; results[ 5].s6 = v[6].s5; results[ 5].s7 = v[7].s5; results[ 5].s8 = v[8].s5; results[ 5].s9 = v[9].s5; results[ 5].sA = v[10].s5;, results[ 5].sB = v[11].s5;, results[ 5].sC = v[12].s5;, results[ 5].sD = v[13].s5;, results[ 5].sE = v[14].s5;, results[ 5].sF = v[15].s5; - results[ 6].s0 = v[0].s6; results[ 6].s1 = v[1].s6; results[ 6].s2 = v[2].s6; results[ 6].s3 = v[3].s6; results[ 6].s4 = v[4].s6; results[ 6].s5 = v[5].s6; results[ 6].s6 = v[6].s6; results[ 6].s7 = v[7].s6; results[ 6].s8 = v[8].s6; results[ 6].s9 = v[9].s6; results[ 6].sA = v[10].s6;, results[ 6].sB = v[11].s6;, results[ 6].sC = v[12].s6;, results[ 6].sD = v[13].s6;, results[ 6].sE = v[14].s6;, results[ 6].sF = v[15].s6; - results[ 7].s0 = v[0].s7; results[ 7].s1 = v[1].s7; results[ 7].s2 = v[2].s7; results[ 7].s3 = v[3].s7; results[ 7].s4 = v[4].s7; results[ 7].s5 = v[5].s7; results[ 7].s6 = v[6].s7; results[ 7].s7 = v[7].s7; results[ 7].s8 = v[8].s7; results[ 7].s9 = v[9].s7; results[ 7].sA = v[10].s7;, results[ 7].sB = v[11].s7;, results[ 7].sC = v[12].s7;, results[ 7].sD = v[13].s7;, results[ 7].sE = v[14].s7;, results[ 7].sF = v[15].s7; - results[ 8].s0 = v[0].s8; results[ 8].s1 = v[1].s8; results[ 8].s2 = v[2].s8; results[ 8].s3 = v[3].s8; results[ 8].s4 = v[4].s8; results[ 8].s5 = v[5].s8; results[ 8].s6 = v[6].s8; results[ 8].s7 = v[7].s8; results[ 8].s8 = v[8].s8; results[ 8].s9 = v[9].s8; results[ 8].sA = v[10].s8;, results[ 8].sB = v[11].s8;, results[ 8].sC = v[12].s8;, results[ 8].sD = v[13].s8;, results[ 8].sE = v[14].s8;, results[ 8].sF = v[15].s8; - results[ 9].s0 = v[0].s9; results[ 9].s1 = v[1].s9; results[ 9].s2 = v[2].s9; results[ 9].s3 = v[3].s9; results[ 9].s4 = v[4].s9; results[ 9].s5 = v[5].s9; results[ 9].s6 = v[6].s9; results[ 9].s7 = v[7].s9; results[ 9].s8 = v[8].s9; results[ 9].s9 = v[9].s9; results[ 9].sA = v[10].s9;, results[ 9].sB = v[11].s9;, results[ 9].sC = v[12].s9;, results[ 9].sD = v[13].s9;, results[ 9].sE = v[14].s9;, results[ 9].sF = v[15].s9; - results[10].s0 = v[0].sA; results[10].s1 = v[1].sA; results[10].s2 = v[2].sA; results[10].s3 = v[3].sA; results[10].s4 = v[4].sA; results[10].s5 = v[5].sA; results[10].s6 = v[6].sA; results[10].s7 = v[7].sA; results[10].s8 = v[8].sA; results[10].s9 = v[9].sA; results[10].sA = v[10].sA;, results[10].sB = v[11].sA;, results[10].sC = v[12].sA;, results[10].sD = v[13].sA;, results[10].sE = v[14].sA;, results[10].sF = v[15].sA; - results[11].s0 = v[0].sB; results[11].s1 = v[1].sB; results[11].s2 = v[2].sB; results[11].s3 = v[3].sB; results[11].s4 = v[4].sB; results[11].s5 = v[5].sB; results[11].s6 = v[6].sB; results[11].s7 = v[7].sB; results[11].s8 = v[8].sB; results[11].s9 = v[9].sB; results[11].sA = v[10].sB;, results[11].sB = v[11].sB;, results[11].sC = v[12].sB;, results[11].sD = v[13].sB;, results[11].sE = v[14].sB;, results[11].sF = v[15].sB; - results[12].s0 = v[0].sC; results[12].s1 = v[1].sC; results[12].s2 = v[2].sC; results[12].s3 = v[3].sC; results[12].s4 = v[4].sC; results[12].s5 = v[5].sC; results[12].s6 = v[6].sC; results[12].s7 = v[7].sC; results[12].s8 = v[8].sC; results[12].s9 = v[9].sC; results[12].sA = v[10].sC;, results[12].sB = v[11].sC;, results[12].sC = v[12].sC;, results[12].sD = v[13].sC;, results[12].sE = v[14].sC;, results[12].sF = v[15].sC; - results[13].s0 = v[0].sD; results[13].s1 = v[1].sD; results[13].s2 = v[2].sD; results[13].s3 = v[3].sD; results[13].s4 = v[4].sD; results[13].s5 = v[5].sD; results[13].s6 = v[6].sD; results[13].s7 = v[7].sD; results[13].s8 = v[8].sD; results[13].s9 = v[9].sD; results[13].sA = v[10].sD;, results[13].sB = v[11].sD;, results[13].sC = v[12].sD;, results[13].sD = v[13].sD;, results[13].sE = v[14].sD;, results[13].sF = v[15].sD; - results[14].s0 = v[0].sE; results[14].s1 = v[1].sE; results[14].s2 = v[2].sE; results[14].s3 = v[3].sE; results[14].s4 = v[4].sE; results[14].s5 = v[5].sE; results[14].s6 = v[6].sE; results[14].s7 = v[7].sE; results[14].s8 = v[8].sE; results[14].s9 = v[9].sE; results[14].sA = v[10].sE;, results[14].sB = v[11].sE;, results[14].sC = v[12].sE;, results[14].sD = v[13].sE;, results[14].sE = v[14].sE;, results[14].sF = v[15].sE; - results[15].s0 = v[0].sF; results[15].s1 = v[1].sF; results[15].s2 = v[2].sF; results[15].s3 = v[3].sF; results[15].s4 = v[4].sF; results[15].s5 = v[5].sF; results[15].s6 = v[6].sF; results[15].s7 = v[7].sF; results[15].s8 = v[8].sF; results[15].s9 = v[9].sF; results[15].sA = v[10].sF;, results[15].sB = v[11].sF;, results[15].sC = v[12].sF;, results[15].sD = v[13].sF;, results[15].sE = v[14].sF;, results[15].sF = v[15].sF; + results[ 0].s0 = v[0].s0; results[ 0].s1 = v[1].s0; results[ 0].s2 = v[2].s0; results[ 0].s3 = v[3].s0; results[ 0].s4 = v[4].s0; results[ 0].s5 = v[5].s0; results[ 0].s6 = v[6].s0; results[ 0].s7 = v[7].s0; results[ 0].s8 = v[8].s0; results[ 0].s9 = v[9].s0; results[ 0].sA = v[10].s0; results[ 0].sB = v[11].s0; results[ 0].sC = v[12].s0; results[ 0].sD = v[13].s0; results[ 0].sE = v[14].s0; results[ 0].sF = v[15].s0; + results[ 1].s0 = v[0].s1; results[ 1].s1 = v[1].s1; results[ 1].s2 = v[2].s1; results[ 1].s3 = v[3].s1; results[ 1].s4 = v[4].s1; results[ 1].s5 = v[5].s1; results[ 1].s6 = v[6].s1; results[ 1].s7 = v[7].s1; results[ 1].s8 = v[8].s1; results[ 1].s9 = v[9].s1; results[ 1].sA = v[10].s1; results[ 1].sB = v[11].s1; results[ 1].sC = v[12].s1; results[ 1].sD = v[13].s1; results[ 1].sE = v[14].s1; results[ 1].sF = v[15].s1; + results[ 2].s0 = v[0].s2; results[ 2].s1 = v[1].s2; results[ 2].s2 = v[2].s2; results[ 2].s3 = v[3].s2; results[ 2].s4 = v[4].s2; results[ 2].s5 = v[5].s2; results[ 2].s6 = v[6].s2; results[ 2].s7 = v[7].s2; results[ 2].s8 = v[8].s2; results[ 2].s9 = v[9].s2; results[ 2].sA = v[10].s2; results[ 2].sB = v[11].s2; results[ 2].sC = v[12].s2; results[ 2].sD = v[13].s2; results[ 2].sE = v[14].s2; results[ 2].sF = v[15].s2; + results[ 3].s0 = v[0].s3; results[ 3].s1 = v[1].s3; results[ 3].s2 = v[2].s3; results[ 3].s3 = v[3].s3; results[ 3].s4 = v[4].s3; results[ 3].s5 = v[5].s3; results[ 3].s6 = v[6].s3; results[ 3].s7 = v[7].s3; results[ 3].s8 = v[8].s3; results[ 3].s9 = v[9].s3; results[ 3].sA = v[10].s3; results[ 3].sB = v[11].s3; results[ 3].sC = v[12].s3; results[ 3].sD = v[13].s3; results[ 3].sE = v[14].s3; results[ 3].sF = v[15].s3; + results[ 4].s0 = v[0].s4; results[ 4].s1 = v[1].s4; results[ 4].s2 = v[2].s4; results[ 4].s3 = v[3].s4; results[ 4].s4 = v[4].s4; results[ 4].s5 = v[5].s4; results[ 4].s6 = v[6].s4; results[ 4].s7 = v[7].s4; results[ 4].s8 = v[8].s4; results[ 4].s9 = v[9].s4; results[ 4].sA = v[10].s4; results[ 4].sB = v[11].s4; results[ 4].sC = v[12].s4; results[ 4].sD = v[13].s4; results[ 4].sE = v[14].s4; results[ 4].sF = v[15].s4; + results[ 5].s0 = v[0].s5; results[ 5].s1 = v[1].s5; results[ 5].s2 = v[2].s5; results[ 5].s3 = v[3].s5; results[ 5].s4 = v[4].s5; results[ 5].s5 = v[5].s5; results[ 5].s6 = v[6].s5; results[ 5].s7 = v[7].s5; results[ 5].s8 = v[8].s5; results[ 5].s9 = v[9].s5; results[ 5].sA = v[10].s5; results[ 5].sB = v[11].s5; results[ 5].sC = v[12].s5; results[ 5].sD = v[13].s5; results[ 5].sE = v[14].s5; results[ 5].sF = v[15].s5; + results[ 6].s0 = v[0].s6; results[ 6].s1 = v[1].s6; results[ 6].s2 = v[2].s6; results[ 6].s3 = v[3].s6; results[ 6].s4 = v[4].s6; results[ 6].s5 = v[5].s6; results[ 6].s6 = v[6].s6; results[ 6].s7 = v[7].s6; results[ 6].s8 = v[8].s6; results[ 6].s9 = v[9].s6; results[ 6].sA = v[10].s6; results[ 6].sB = v[11].s6; results[ 6].sC = v[12].s6; results[ 6].sD = v[13].s6; results[ 6].sE = v[14].s6; results[ 6].sF = v[15].s6; + results[ 7].s0 = v[0].s7; results[ 7].s1 = v[1].s7; results[ 7].s2 = v[2].s7; results[ 7].s3 = v[3].s7; results[ 7].s4 = v[4].s7; results[ 7].s5 = v[5].s7; results[ 7].s6 = v[6].s7; results[ 7].s7 = v[7].s7; results[ 7].s8 = v[8].s7; results[ 7].s9 = v[9].s7; results[ 7].sA = v[10].s7; results[ 7].sB = v[11].s7; results[ 7].sC = v[12].s7; results[ 7].sD = v[13].s7; results[ 7].sE = v[14].s7; results[ 7].sF = v[15].s7; + results[ 8].s0 = v[0].s8; results[ 8].s1 = v[1].s8; results[ 8].s2 = v[2].s8; results[ 8].s3 = v[3].s8; results[ 8].s4 = v[4].s8; results[ 8].s5 = v[5].s8; results[ 8].s6 = v[6].s8; results[ 8].s7 = v[7].s8; results[ 8].s8 = v[8].s8; results[ 8].s9 = v[9].s8; results[ 8].sA = v[10].s8; results[ 8].sB = v[11].s8; results[ 8].sC = v[12].s8; results[ 8].sD = v[13].s8; results[ 8].sE = v[14].s8; results[ 8].sF = v[15].s8; + results[ 9].s0 = v[0].s9; results[ 9].s1 = v[1].s9; results[ 9].s2 = v[2].s9; results[ 9].s3 = v[3].s9; results[ 9].s4 = v[4].s9; results[ 9].s5 = v[5].s9; results[ 9].s6 = v[6].s9; results[ 9].s7 = v[7].s9; results[ 9].s8 = v[8].s9; results[ 9].s9 = v[9].s9; results[ 9].sA = v[10].s9; results[ 9].sB = v[11].s9; results[ 9].sC = v[12].s9; results[ 9].sD = v[13].s9; results[ 9].sE = v[14].s9; results[ 9].sF = v[15].s9; + results[10].s0 = v[0].sA; results[10].s1 = v[1].sA; results[10].s2 = v[2].sA; results[10].s3 = v[3].sA; results[10].s4 = v[4].sA; results[10].s5 = v[5].sA; results[10].s6 = v[6].sA; results[10].s7 = v[7].sA; results[10].s8 = v[8].sA; results[10].s9 = v[9].sA; results[10].sA = v[10].sA; results[10].sB = v[11].sA; results[10].sC = v[12].sA; results[10].sD = v[13].sA; results[10].sE = v[14].sA; results[10].sF = v[15].sA; + results[11].s0 = v[0].sB; results[11].s1 = v[1].sB; results[11].s2 = v[2].sB; results[11].s3 = v[3].sB; results[11].s4 = v[4].sB; results[11].s5 = v[5].sB; results[11].s6 = v[6].sB; results[11].s7 = v[7].sB; results[11].s8 = v[8].sB; results[11].s9 = v[9].sB; results[11].sA = v[10].sB; results[11].sB = v[11].sB; results[11].sC = v[12].sB; results[11].sD = v[13].sB; results[11].sE = v[14].sB; results[11].sF = v[15].sB; + results[12].s0 = v[0].sC; results[12].s1 = v[1].sC; results[12].s2 = v[2].sC; results[12].s3 = v[3].sC; results[12].s4 = v[4].sC; results[12].s5 = v[5].sC; results[12].s6 = v[6].sC; results[12].s7 = v[7].sC; results[12].s8 = v[8].sC; results[12].s9 = v[9].sC; results[12].sA = v[10].sC; results[12].sB = v[11].sC; results[12].sC = v[12].sC; results[12].sD = v[13].sC; results[12].sE = v[14].sC; results[12].sF = v[15].sC; + results[13].s0 = v[0].sD; results[13].s1 = v[1].sD; results[13].s2 = v[2].sD; results[13].s3 = v[3].sD; results[13].s4 = v[4].sD; results[13].s5 = v[5].sD; results[13].s6 = v[6].sD; results[13].s7 = v[7].sD; results[13].s8 = v[8].sD; results[13].s9 = v[9].sD; results[13].sA = v[10].sD; results[13].sB = v[11].sD; results[13].sC = v[12].sD; results[13].sD = v[13].sD; results[13].sE = v[14].sD; results[13].sF = v[15].sD; + results[14].s0 = v[0].sE; results[14].s1 = v[1].sE; results[14].s2 = v[2].sE; results[14].s3 = v[3].sE; results[14].s4 = v[4].sE; results[14].s5 = v[5].sE; results[14].s6 = v[6].sE; results[14].s7 = v[7].sE; results[14].s8 = v[8].sE; results[14].s9 = v[9].sE; results[14].sA = v[10].sE; results[14].sB = v[11].sE; results[14].sC = v[12].sE; results[14].sD = v[13].sE; results[14].sE = v[14].sE; results[14].sF = v[15].sE; + results[15].s0 = v[0].sF; results[15].s1 = v[1].sF; results[15].s2 = v[2].sF; results[15].s3 = v[3].sF; results[15].s4 = v[4].sF; results[15].s5 = v[5].sF; results[15].s6 = v[6].sF; results[15].s7 = v[7].sF; results[15].s8 = v[8].sF; results[15].s9 = v[9].sF; results[15].sA = v[10].sF; results[15].sB = v[11].sF; results[15].sC = v[12].sF; results[15].sD = v[13].sF; results[15].sE = v[14].sF; results[15].sF = v[15].sF; #endif // Multiplies by alpha and then stores the results into the destination matrix diff --git a/src/kernels/level3/transpose_pad.opencl b/src/kernels/level3/transpose_pad.opencl index 49c5b9a3..ba9a6a56 100644 --- a/src/kernels/level3/transpose_pad.opencl +++ b/src/kernels/level3/transpose_pad.opencl @@ -24,7 +24,7 @@ R"( // Transposes a matrix from source to destination. The output is padded with zero values in case the // destination matrix dimensions are larger than the transposed source matrix dimensions. -INLINE_FUNC void _TransposePadMatrix(__local real* tile, +INLINE_FUNC void _TransposePadMatrix(LOCAL_PTR real* tile, const int src_one, const int src_two, const int src_ld, const int src_offset, __global const real* restrict src, @@ -105,7 +105,7 @@ void TransposePadMatrix(const int src_one, const int src_two, // Transposes a matrix, while considering possible padding in the source matrix. Data is read from a // padded source matrix, but only the actual data is written back to the transposed destination // matrix. This kernel optionally checks for upper/lower triangular matrices. -INLINE_FUNC void _TransposeMatrix(__local real* tile, +INLINE_FUNC void _TransposeMatrix(LOCAL_PTR real* tile, const int src_one, const int src_two, const int src_ld, const int src_offset, __global const real* restrict src, diff --git a/src/kernels/level3/xgemm_direct_part1.opencl b/src/kernels/level3/xgemm_direct_part1.opencl index 8b650589..7d185224 100644 --- a/src/kernels/level3/xgemm_direct_part1.opencl +++ b/src/kernels/level3/xgemm_direct_part1.opencl @@ -184,7 +184,7 @@ INLINE_FUNC void GlobalToPrivateCheckedB(const __global real* restrict bgms, rea // Caches on-chip local memory into per-thread private memory (registers). This function is specific // for caching the A input matrix. -INLINE_FUNC void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg, +INLINE_FUNC void LocalToPrivateDirectA(LOCAL_PTR real* alm, real apm[MWID], const int kg, const int a_transpose) { #pragma unroll for (int mi=0; mi Date: Sat, 14 Oct 2017 17:21:34 +0200 Subject: Fixed a kernel/attribute order bug in the direct GEMM kernels --- src/kernels/level3/xgemm_direct_batched.opencl | 16 ++++++++-------- src/kernels/level3/xgemm_direct_part3.opencl | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/kernels/level3/xgemm_direct_batched.opencl b/src/kernels/level3/xgemm_direct_batched.opencl index fa582cff..d946a056 100644 --- a/src/kernels/level3/xgemm_direct_batched.opencl +++ b/src/kernels/level3/xgemm_direct_batched.opencl @@ -19,8 +19,8 @@ R"( // ================================================================================================= // Direct version of the batched GEMM kernel with [A, B] = [non-transposed, non-transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK, const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas, const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld, const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld, @@ -40,8 +40,8 @@ __kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int } // Direct version of the batched GEMM kernel with [A, B] = [non-transposed, transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK, const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas, const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld, const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld, @@ -61,8 +61,8 @@ __kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int } // Direct version of the batched GEMM kernel with [A, B] = [transposed, non-transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK, const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas, const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld, const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld, @@ -82,8 +82,8 @@ __kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int } // Direct version of the batched GEMM kernel with [A, B] = [transposed, transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK, const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas, const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld, const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld, diff --git a/src/kernels/level3/xgemm_direct_part3.opencl b/src/kernels/level3/xgemm_direct_part3.opencl index dcdeb1b6..5862dfa3 100644 --- a/src/kernels/level3/xgemm_direct_part3.opencl +++ b/src/kernels/level3/xgemm_direct_part3.opencl @@ -147,8 +147,8 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize // ================================================================================================= // Direct version of the GEMM kernel with [A, B] = [non-transposed, non-transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK, const real_arg arg_alpha, const real_arg arg_beta, const __global realMD* restrict agm, const int a_offset, const int a_ld, const __global realND* restrict bgm, const int b_offset, const int b_ld, @@ -162,8 +162,8 @@ __kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK } // Direct version of the GEMM kernel with [A, B] = [non-transposed, transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK, const real_arg arg_alpha, const real_arg arg_beta, const __global realMD* restrict agm, const int a_offset, const int a_ld, const __global realND* restrict bgm, const int b_offset, const int b_ld, @@ -177,8 +177,8 @@ __kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK } // Direct version of the GEMM kernel with [A, B] = [transposed, non-transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK, const real_arg arg_alpha, const real_arg arg_beta, const __global realMD* restrict agm, const int a_offset, const int a_ld, const __global realND* restrict bgm, const int b_offset, const int b_ld, @@ -192,8 +192,8 @@ __kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK } // Direct version of the GEMM kernel with [A, B] = [transposed, transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK, +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK, const real_arg arg_alpha, const real_arg arg_beta, const __global realMD* restrict agm, const int a_offset, const int a_ld, const __global realND* restrict bgm, const int b_offset, const int b_ld, -- cgit v1.2.3 From 7408da174c848ffeaa1fe2da52f26a057e65b0f1 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 15 Oct 2017 12:17:35 +0200 Subject: Various fixes to make the first CUDA examples work --- src/cupp11.hpp | 12 ++++++++---- src/utilities/utilities.cpp | 16 ++++++++++------ 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/cupp11.hpp b/src/cupp11.hpp index 854c0be9..5b18d4cf 100644 --- a/src/cupp11.hpp +++ b/src/cupp11.hpp @@ -41,6 +41,7 @@ #include // std::string #include // std::vector #include // std::shared_ptr +#include // std::strlen // CUDA #include // CUDA driver API @@ -251,6 +252,7 @@ public: auto result = std::string{}; result.resize(kStringLength); CheckError(cuDeviceGetName(&result[0], result.size(), device_)); + result.resize(strlen(result.c_str())); // Removes any trailing '\0'-characters return result; } std::string Type() const { return "GPU"; } @@ -657,12 +659,13 @@ public: // Constructor based on the regular CUDA data-type: memory management is handled elsewhere explicit Kernel(const CUmodule module, const CUfunction kernel): + name_("unknown"), module_(module), kernel_(kernel) { } // Regular constructor with memory management - explicit Kernel(const Program &program, const std::string &name) { + explicit Kernel(const Program &program, const std::string &name): name_(name) { CheckError(cuModuleLoadDataEx(&module_, program.GetIR().data(), 0, nullptr, nullptr)); CheckError(cuModuleGetFunction(&kernel_, module_, name.c_str())); } @@ -701,7 +704,7 @@ public: // Retrieves the name of the kernel std::string GetFunctionName() const { - return std::string{"unknown"}; // Not implemented for the CUDA backend + return name_; } // Launches a kernel onto the specified queue @@ -722,10 +725,10 @@ public: } // Launches the kernel, its execution time is recorded by events - CheckError(cuEventRecord(event->start(), queue())); + if (event) { CheckError(cuEventRecord(event->start(), queue())); } CheckError(cuLaunchKernel(kernel_, grid[0], grid[1], grid[2], block[0], block[1], block[2], 0, queue(), pointers.data(), nullptr)); - CheckError(cuEventRecord(event->end(), queue())); + if (event) { CheckError(cuEventRecord(event->end(), queue())); } } // As above, but with an event waiting list @@ -748,6 +751,7 @@ public: const CUfunction& operator()() const { return kernel_; } CUfunction operator()() { return kernel_; } private: + const std::string name_; CUmodule module_; CUfunction kernel_; std::vector arguments_indices_; // Indices of the arguments diff --git a/src/utilities/utilities.cpp b/src/utilities/utilities.cpp index a5c1d45e..f2574104 100644 --- a/src/utilities/utilities.cpp +++ b/src/utilities/utilities.cpp @@ -413,13 +413,17 @@ std::string GetDeviceVendor(const Device& device) { // Mid-level info std::string GetDeviceArchitecture(const Device& device) { auto device_architecture = std::string{""}; - if (device.HasExtension(kKhronosAttributesNVIDIA)) { + #ifdef CUDA_API device_architecture = device.NVIDIAComputeCapability(); - } - else if (device.HasExtension(kKhronosAttributesAMD)) { - device_architecture = device.Name(); // Name is architecture for AMD APP and AMD ROCm - } - // Note: no else - 'device_architecture' might be the empty string + #else + if (device.HasExtension(kKhronosAttributesNVIDIA)) { + device_architecture = device.NVIDIAComputeCapability(); + } + else if (device.HasExtension(kKhronosAttributesAMD)) { + device_architecture = device.Name(); // Name is architecture for AMD APP and AMD ROCm + } + // Note: no else - 'device_architecture' might be the empty string + #endif for (auto &find_and_replace : device_mapping::kArchitectureNames) { // replacing to common names if (device_architecture == find_and_replace.first) { device_architecture = find_and_replace.second; } -- cgit v1.2.3 From a3069a97c3e5c22635786870c8a9d02ca16d3d1d Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 15 Oct 2017 13:56:19 +0200 Subject: Prepared test and client infrastructure for use with the CUDA API --- CMakeLists.txt | 6 +++++- test/correctness/testblas.cpp | 44 ++++++++++++++---------------------------- test/correctness/tester.hpp | 2 +- test/performance/client.hpp | 2 +- test/routines/level1/xaxpy.hpp | 21 +++++++++++++------- test/test_utilities.cpp | 44 ++++++++++++++++++++++-------------------- test/test_utilities.hpp | 21 ++++++++++++++++++-- 7 files changed, 78 insertions(+), 62 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d4e47215..e2f43f8e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -165,8 +165,10 @@ endif() # Locates the reference BLAS libraries in case the tests need to be compiled. The "FindclBLAS.cmake", # "FindCBLAS.cmake" and "FindcuBLAS.cmake" are included. if(CLIENTS OR TESTS) - find_package(clBLAS) find_package(CBLAS) + if(OPENCL) + find_package(clBLAS) + endif() if(CUBLAS) find_package(cuBLAS) endif() @@ -195,6 +197,8 @@ set(LEVEL3_ROUTINES xgemm xsymm xhemm xsyrk xherk xsyr2k xher2k xtrmm xtrsm) set(LEVELX_ROUTINES xomatcopy xim2col xaxpybatched xgemmbatched) set(ROUTINES ${LEVEL1_ROUTINES} ${LEVEL2_ROUTINES} ${LEVEL3_ROUTINES} ${LEVELX_ROUTINES}) set(PRECISIONS 32 64 3232 6464 16) + +# Sample programs if(OPENCL) set(SAMPLE_PROGRAMS_CPP sgemm sgemm_batched) set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache) diff --git a/test/correctness/testblas.cpp b/test/correctness/testblas.cpp index 659131c5..aa4b4785 100644 --- a/test/correctness/testblas.cpp +++ b/test/correctness/testblas.cpp @@ -241,36 +241,22 @@ void TestBlas::TestInvalid(std::vector> &test_vector, const st std::cout << std::flush; } - // Creates the OpenCL buffers. Note: we are not using the C++ version since we explicitly + // Creates the buffers. Note: we are not using the cxpp11.h C++ version since we explicitly // want to be able to create invalid buffers (no error checking here). - auto x1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.x_size*sizeof(T), nullptr,nullptr); - auto y1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.y_size*sizeof(T), nullptr,nullptr); - auto a1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.a_size*sizeof(T), nullptr,nullptr); - auto b1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.b_size*sizeof(T), nullptr,nullptr); - auto c1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.c_size*sizeof(T), nullptr,nullptr); - auto ap1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.ap_size*sizeof(T), nullptr,nullptr); - auto d1 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.scalar_size*sizeof(T), nullptr,nullptr); - auto x_vec1 = Buffer(x1); - auto y_vec1 = Buffer(y1); - auto a_mat1 = Buffer(a1); - auto b_mat1 = Buffer(b1); - auto c_mat1 = Buffer(c1); - auto ap_mat1 = Buffer(ap1); - auto scalar1 = Buffer(d1); - auto x2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.x_size*sizeof(T), nullptr,nullptr); - auto y2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.y_size*sizeof(T), nullptr,nullptr); - auto a2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.a_size*sizeof(T), nullptr,nullptr); - auto b2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.b_size*sizeof(T), nullptr,nullptr); - auto c2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.c_size*sizeof(T), nullptr,nullptr); - auto ap2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.ap_size*sizeof(T), nullptr,nullptr); - auto d2 = clCreateBuffer(context_(), CL_MEM_READ_WRITE, args.scalar_size*sizeof(T), nullptr,nullptr); - auto x_vec2 = Buffer(x2); - auto y_vec2 = Buffer(y2); - auto a_mat2 = Buffer(a2); - auto b_mat2 = Buffer(b2); - auto c_mat2 = Buffer(c2); - auto ap_mat2 = Buffer(ap2); - auto scalar2 = Buffer(d2); + auto x_vec1 = CreateInvalidBuffer(context_, args.x_size); + auto y_vec1 = CreateInvalidBuffer(context_, args.y_size); + auto a_mat1 = CreateInvalidBuffer(context_, args.a_size); + auto b_mat1 = CreateInvalidBuffer(context_, args.b_size); + auto c_mat1 = CreateInvalidBuffer(context_, args.c_size); + auto ap_mat1 = CreateInvalidBuffer(context_, args.ap_size); + auto scalar1 = CreateInvalidBuffer(context_, args.scalar_size); + auto x_vec2 = CreateInvalidBuffer(context_, args.x_size); + auto y_vec2 = CreateInvalidBuffer(context_, args.y_size); + auto a_mat2 = CreateInvalidBuffer(context_, args.a_size); + auto b_mat2 = CreateInvalidBuffer(context_, args.b_size); + auto c_mat2 = CreateInvalidBuffer(context_, args.c_size); + auto ap_mat2 = CreateInvalidBuffer(context_, args.ap_size); + auto scalar2 = CreateInvalidBuffer(context_, args.scalar_size); auto buffers1 = Buffers{x_vec1, y_vec1, a_mat1, b_mat1, c_mat1, ap_mat1, scalar1}; auto buffers2 = Buffers{x_vec2, y_vec2, a_mat2, b_mat2, c_mat2, ap_mat2, scalar2}; diff --git a/test/correctness/tester.hpp b/test/correctness/tester.hpp index caf03787..640f870a 100644 --- a/test/correctness/tester.hpp +++ b/test/correctness/tester.hpp @@ -22,13 +22,13 @@ #include #include +#include "utilities/utilities.hpp" #include "test/test_utilities.hpp" // The libraries #ifdef CLBLAST_REF_CLBLAS #include #endif -#include "clblast.h" namespace clblast { // ================================================================================================= diff --git a/test/performance/client.hpp b/test/performance/client.hpp index 2ba09cb9..0b6176c8 100644 --- a/test/performance/client.hpp +++ b/test/performance/client.hpp @@ -32,7 +32,7 @@ #include #endif #include "test/wrapper_cuda.hpp" -#include "clblast.h" +#include "utilities/utilities.hpp" namespace clblast { // ================================================================================================= diff --git a/test/routines/level1/xaxpy.hpp b/test/routines/level1/xaxpy.hpp index 17cae6ad..cdceb4c7 100644 --- a/test/routines/level1/xaxpy.hpp +++ b/test/routines/level1/xaxpy.hpp @@ -70,13 +70,20 @@ class TestXaxpy { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Axpy(args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Axpy(args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Axpy(args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + #endif return status; } diff --git a/test/test_utilities.cpp b/test/test_utilities.cpp index 579eb61c..84f8894f 100644 --- a/test/test_utilities.cpp +++ b/test/test_utilities.cpp @@ -88,27 +88,29 @@ void FloatToHalfBuffer(std::vector& result, const std::vector& sour } // As above, but now for OpenCL data-types instead of std::vectors -Buffer HalfToFloatBuffer(const Buffer& source, RawCommandQueue queue_raw) { - const auto size = source.GetSize() / sizeof(half); - auto queue = Queue(queue_raw); - auto context = queue.GetContext(); - auto source_cpu = std::vector(size); - source.Read(queue, size, source_cpu); - auto result_cpu = HalfToFloatBuffer(source_cpu); - auto result = Buffer(context, size); - result.Write(queue, size, result_cpu); - return result; -} -void FloatToHalfBuffer(Buffer& result, const Buffer& source, RawCommandQueue queue_raw) { - const auto size = source.GetSize() / sizeof(float); - auto queue = Queue(queue_raw); - auto context = queue.GetContext(); - auto source_cpu = std::vector(size); - source.Read(queue, size, source_cpu); - auto result_cpu = std::vector(size); - FloatToHalfBuffer(result_cpu, source_cpu); - result.Write(queue, size, result_cpu); -} +#ifdef OPENCL_API + Buffer HalfToFloatBuffer(const Buffer& source, RawCommandQueue queue_raw) { + const auto size = source.GetSize() / sizeof(half); + auto queue = Queue(queue_raw); + auto context = queue.GetContext(); + auto source_cpu = std::vector(size); + source.Read(queue, size, source_cpu); + auto result_cpu = HalfToFloatBuffer(source_cpu); + auto result = Buffer(context, size); + result.Write(queue, size, result_cpu); + return result; + } + void FloatToHalfBuffer(Buffer& result, const Buffer& source, RawCommandQueue queue_raw) { + const auto size = source.GetSize() / sizeof(float); + auto queue = Queue(queue_raw); + auto context = queue.GetContext(); + auto source_cpu = std::vector(size); + source.Read(queue, size, source_cpu); + auto result_cpu = std::vector(size); + FloatToHalfBuffer(result_cpu, source_cpu); + result.Write(queue, size, result_cpu); + } +#endif // ================================================================================================= } // namespace clblast diff --git a/test/test_utilities.hpp b/test/test_utilities.hpp index fe7a9cd2..d03c55fc 100644 --- a/test/test_utilities.hpp +++ b/test/test_utilities.hpp @@ -89,8 +89,25 @@ std::vector HalfToFloatBuffer(const std::vector& source); void FloatToHalfBuffer(std::vector& result, const std::vector& source); // As above, but now for OpenCL data-types instead of std::vectors -Buffer HalfToFloatBuffer(const Buffer& source, RawCommandQueue queue_raw); -void FloatToHalfBuffer(Buffer& result, const Buffer& source, RawCommandQueue queue_raw); +#ifdef OPENCL_API + Buffer HalfToFloatBuffer(const Buffer& source, RawCommandQueue queue_raw); + void FloatToHalfBuffer(Buffer& result, const Buffer& source, RawCommandQueue queue_raw); +#endif + +// ================================================================================================= + +// Creates a buffer but don't test for validity. That's the reason this is not using the clpp11.h or +// cupp11.h interface. +template +Buffer CreateInvalidBuffer(const Context& context, const size_t size) { + #ifdef OPENCL_API + auto raw_buffer = clCreateBuffer(context(), CL_MEM_READ_WRITE, size * sizeof(T), nullptr, nullptr); + #elif CUDA_API + CUdeviceptr raw_buffer; + cuMemAlloc(&raw_buffer, size * sizeof(T)); + #endif + return Buffer(raw_buffer); +} // ================================================================================================= } // namespace clblast -- cgit v1.2.3 From 71049e8d3966ac58263355a41abb4eac5dec818f Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 15 Oct 2017 17:41:44 +0200 Subject: Added the SM-compute-arch version to the nv compile options --- src/cupp11.hpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/cupp11.hpp b/src/cupp11.hpp index 5b18d4cf..2a54ef95 100644 --- a/src/cupp11.hpp +++ b/src/cupp11.hpp @@ -272,6 +272,11 @@ public: const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR); return "SM"+std::to_string(major)+"."+std::to_string(minor); } + std::string ComputeArch() const { + const auto major = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR); + const auto minor = GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR); + return "compute_"+std::to_string(major)+std::to_string(minor); + } bool HasExtension(const std::string &extension) const { return false; } bool SupportsFP64() const { return true; } bool SupportsFP16() const { @@ -396,7 +401,8 @@ public: } // Compiles the device program and checks whether or not there are any warnings/errors - void Build(const Device &, std::vector &options) { + void Build(const Device &device, std::vector &options) { + options.push_back("-arch=" + device.ComputeArch()); if (from_binary_) { return; } auto raw_options = std::vector(); for (const auto &option: options) { -- cgit v1.2.3 From 7663cba23487290d7bf62c268410c840e3ee7972 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 15 Oct 2017 17:43:20 +0200 Subject: Fixes for the CUDA API: first tests pass and the client runs --- src/kernels/opencl_to_cuda.h | 4 ++-- test/routines/level1/xaxpy.hpp | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/kernels/opencl_to_cuda.h b/src/kernels/opencl_to_cuda.h index fac30dfc..7602b539 100644 --- a/src/kernels/opencl_to_cuda.h +++ b/src/kernels/opencl_to_cuda.h @@ -32,8 +32,8 @@ __device__ int get_group_id(const int x) { return blockIdx.z; } __device__ int get_global_size(const int x) { - if (x == 0) { return gridDim.x; } - if (x == 1) { return gridDim.y; } + if (x == 0) { return gridDim.x * blockDim.x; } + if (x == 1) { return gridDim.y * blockDim.y; } return gridDim.z; } __device__ int get_global_id(const int x) { diff --git a/test/routines/level1/xaxpy.hpp b/test/routines/level1/xaxpy.hpp index cdceb4c7..7491a9e8 100644 --- a/test/routines/level1/xaxpy.hpp +++ b/test/routines/level1/xaxpy.hpp @@ -83,6 +83,7 @@ class TestXaxpy { buffers.x_vec(), args.x_offset, args.x_inc, buffers.y_vec(), args.y_offset, args.y_inc, queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); #endif return status; } -- cgit v1.2.3 From e6da575fff9d55de2b83def06243ca8dc9038f40 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 15 Oct 2017 19:35:21 +0200 Subject: Modified test interfaces such that they support either OpenCL or CUDA --- test/routines/level1/xamax.hpp | 22 +++++++++++++++------- test/routines/level1/xasum.hpp | 18 +++++++++++++----- test/routines/level1/xcopy.hpp | 22 +++++++++++++++------- test/routines/level1/xdot.hpp | 25 +++++++++++++++++-------- test/routines/level1/xdotc.hpp | 25 +++++++++++++++++-------- test/routines/level1/xdotu.hpp | 25 +++++++++++++++++-------- test/routines/level1/xnrm2.hpp | 22 +++++++++++++++------- test/routines/level1/xscal.hpp | 19 +++++++++++++------ test/routines/level1/xswap.hpp | 22 +++++++++++++++------- test/routines/level2/xgbmv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xgemv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xger.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xgerc.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xgeru.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xhbmv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xhemv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xher.hpp | 25 +++++++++++++++++-------- test/routines/level2/xher2.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xhpmv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xhpr.hpp | 25 +++++++++++++++++-------- test/routines/level2/xhpr2.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xsbmv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xspmv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xspr.hpp | 25 +++++++++++++++++-------- test/routines/level2/xspr2.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xsymv.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xsyr.hpp | 25 +++++++++++++++++-------- test/routines/level2/xsyr2.hpp | 28 +++++++++++++++++++--------- test/routines/level2/xtbmv.hpp | 26 ++++++++++++++++++-------- test/routines/level2/xtpmv.hpp | 25 +++++++++++++++++-------- test/routines/level2/xtrmv.hpp | 25 +++++++++++++++++-------- test/routines/level2/xtrsv.hpp | 25 +++++++++++++++++-------- test/routines/level3/xgemm.hpp | 28 +++++++++++++++++++--------- test/routines/level3/xhemm.hpp | 28 +++++++++++++++++++--------- test/routines/level3/xher2k.hpp | 28 +++++++++++++++++++--------- test/routines/level3/xherk.hpp | 25 +++++++++++++++++-------- test/routines/level3/xsymm.hpp | 28 +++++++++++++++++++--------- test/routines/level3/xsyr2k.hpp | 28 +++++++++++++++++++--------- test/routines/level3/xsyrk.hpp | 25 +++++++++++++++++-------- test/routines/level3/xtrmm.hpp | 25 +++++++++++++++++-------- test/routines/level3/xtrsm.hpp | 25 +++++++++++++++++-------- test/routines/levelx/xaxpybatched.hpp | 25 +++++++++++++++++-------- test/routines/levelx/xgemmbatched.hpp | 31 +++++++++++++++++++++---------- test/routines/levelx/xim2col.hpp | 34 +++++++++++++++++++++++----------- test/routines/levelx/xinvert.hpp | 25 +++++++++++++++++-------- test/routines/levelx/xomatcopy.hpp | 25 +++++++++++++++++-------- 46 files changed, 817 insertions(+), 384 deletions(-) diff --git a/test/routines/level1/xamax.hpp b/test/routines/level1/xamax.hpp index 868a79ed..d74807c9 100644 --- a/test/routines/level1/xamax.hpp +++ b/test/routines/level1/xamax.hpp @@ -69,13 +69,21 @@ class TestXamax { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Amax(args.n, - buffers.scalar(), args.imax_offset, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Amax(args.n, + buffers.scalar(), args.imax_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Amax(args.n, + buffers.scalar(), args.imax_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xasum.hpp b/test/routines/level1/xasum.hpp index 6add9c64..573f1223 100644 --- a/test/routines/level1/xasum.hpp +++ b/test/routines/level1/xasum.hpp @@ -69,13 +69,21 @@ class TestXasum { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { + #ifdef OPENCL_API auto queue_plain = queue(); auto event = cl_event{}; - auto status = Asum(args.n, - buffers.scalar(), args.asum_offset, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + auto status = Asum(args.n, + buffers.scalar(), args.asum_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Asum(args.n, + buffers.scalar(), args.asum_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xcopy.hpp b/test/routines/level1/xcopy.hpp index 7a5c99b8..58abdbf4 100644 --- a/test/routines/level1/xcopy.hpp +++ b/test/routines/level1/xcopy.hpp @@ -69,13 +69,21 @@ class TestXcopy { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Copy(args.n, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Copy(args.n, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Copy(args.n, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xdot.hpp b/test/routines/level1/xdot.hpp index 1ea25994..229d18c9 100644 --- a/test/routines/level1/xdot.hpp +++ b/test/routines/level1/xdot.hpp @@ -73,14 +73,23 @@ class TestXdot { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Dot(args.n, - buffers.scalar(), args.dot_offset, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Dot(args.n, + buffers.scalar(), args.dot_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Dot(args.n, + buffers.scalar(), args.dot_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xdotc.hpp b/test/routines/level1/xdotc.hpp index c800c1f5..9a1dc33a 100644 --- a/test/routines/level1/xdotc.hpp +++ b/test/routines/level1/xdotc.hpp @@ -73,14 +73,23 @@ class TestXdotc { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Dotc(args.n, - buffers.scalar(), args.dot_offset, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Dotc(args.n, + buffers.scalar(), args.dot_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Dotc(args.n, + buffers.scalar(), args.dot_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xdotu.hpp b/test/routines/level1/xdotu.hpp index 3545a3a6..4b2c7647 100644 --- a/test/routines/level1/xdotu.hpp +++ b/test/routines/level1/xdotu.hpp @@ -73,14 +73,23 @@ class TestXdotu { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Dotu(args.n, - buffers.scalar(), args.dot_offset, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Dotu(args.n, + buffers.scalar(), args.dot_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Dotu(args.n, + buffers.scalar(), args.dot_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xnrm2.hpp b/test/routines/level1/xnrm2.hpp index 1db70537..f3a789b5 100644 --- a/test/routines/level1/xnrm2.hpp +++ b/test/routines/level1/xnrm2.hpp @@ -69,13 +69,21 @@ class TestXnrm2 { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Nrm2(args.n, - buffers.scalar(), args.nrm2_offset, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Nrm2(args.n, + buffers.scalar(), args.nrm2_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Nrm2(args.n, + buffers.scalar(), args.nrm2_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xscal.hpp b/test/routines/level1/xscal.hpp index efa0988d..95038032 100644 --- a/test/routines/level1/xscal.hpp +++ b/test/routines/level1/xscal.hpp @@ -66,12 +66,19 @@ class TestXscal { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Scal(args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Scal(args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Scal(args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level1/xswap.hpp b/test/routines/level1/xswap.hpp index d778cc23..58310698 100644 --- a/test/routines/level1/xswap.hpp +++ b/test/routines/level1/xswap.hpp @@ -69,13 +69,21 @@ class TestXswap { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Swap(args.n, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Swap(args.n, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Swap(args.n, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xgbmv.hpp b/test/routines/level2/xgbmv.hpp index 23138c77..7c198e5d 100644 --- a/test/routines/level2/xgbmv.hpp +++ b/test/routines/level2/xgbmv.hpp @@ -81,15 +81,25 @@ class TestXgbmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Gbmv(args.layout, args.a_transpose, - args.m, args.n, args.kl, args.ku, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Gbmv(args.layout, args.a_transpose, + args.m, args.n, args.kl, args.ku, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Gbmv(args.layout, args.a_transpose, + args.m, args.n, args.kl, args.ku, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xgemv.hpp b/test/routines/level2/xgemv.hpp index 0ee53b80..780e2976 100644 --- a/test/routines/level2/xgemv.hpp +++ b/test/routines/level2/xgemv.hpp @@ -81,15 +81,25 @@ class TestXgemv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Gemv(args.layout, args.a_transpose, - args.m, args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Gemv(args.layout, args.a_transpose, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Gemv(args.layout, args.a_transpose, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xger.hpp b/test/routines/level2/xger.hpp index 92a1a2ae..9c5e2e40 100644 --- a/test/routines/level2/xger.hpp +++ b/test/routines/level2/xger.hpp @@ -77,15 +77,25 @@ class TestXger { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Ger(args.layout, - args.m, args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - buffers.a_mat(), args.a_offset, args.a_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Ger(args.layout, + args.m, args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Ger(args.layout, + args.m, args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xgerc.hpp b/test/routines/level2/xgerc.hpp index 5d899398..5f58b65d 100644 --- a/test/routines/level2/xgerc.hpp +++ b/test/routines/level2/xgerc.hpp @@ -77,15 +77,25 @@ class TestXgerc { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Gerc(args.layout, - args.m, args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - buffers.a_mat(), args.a_offset, args.a_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Gerc(args.layout, + args.m, args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Gerc(args.layout, + args.m, args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xgeru.hpp b/test/routines/level2/xgeru.hpp index 96dab22e..fea3932c 100644 --- a/test/routines/level2/xgeru.hpp +++ b/test/routines/level2/xgeru.hpp @@ -77,15 +77,25 @@ class TestXgeru { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Geru(args.layout, - args.m, args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - buffers.a_mat(), args.a_offset, args.a_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Geru(args.layout, + args.m, args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Geru(args.layout, + args.m, args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xhbmv.hpp b/test/routines/level2/xhbmv.hpp index b6844744..0ccd69b7 100644 --- a/test/routines/level2/xhbmv.hpp +++ b/test/routines/level2/xhbmv.hpp @@ -75,15 +75,25 @@ class TestXhbmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Hbmv(args.layout, args.triangle, - args.n, args.kl, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Hbmv(args.layout, args.triangle, + args.n, args.kl, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Hbmv(args.layout, args.triangle, + args.n, args.kl, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xhemv.hpp b/test/routines/level2/xhemv.hpp index e1f23592..053bc2dc 100644 --- a/test/routines/level2/xhemv.hpp +++ b/test/routines/level2/xhemv.hpp @@ -75,15 +75,25 @@ class TestXhemv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Hemv(args.layout, args.triangle, - args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Hemv(args.layout, args.triangle, + args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Hemv(args.layout, args.triangle, + args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xher.hpp b/test/routines/level2/xher.hpp index 1ac1247b..745df43f 100644 --- a/test/routines/level2/xher.hpp +++ b/test/routines/level2/xher.hpp @@ -71,14 +71,23 @@ class TestXher { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Her(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.a_mat(), args.a_offset, args.a_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Her(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Her(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xher2.hpp b/test/routines/level2/xher2.hpp index 18ccc1ac..794e9a1e 100644 --- a/test/routines/level2/xher2.hpp +++ b/test/routines/level2/xher2.hpp @@ -75,15 +75,25 @@ class TestXher2 { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Her2(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - buffers.a_mat(), args.a_offset, args.a_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Her2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Her2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xhpmv.hpp b/test/routines/level2/xhpmv.hpp index ad91fe15..157272d3 100644 --- a/test/routines/level2/xhpmv.hpp +++ b/test/routines/level2/xhpmv.hpp @@ -75,15 +75,25 @@ class TestXhpmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Hpmv(args.layout, args.triangle, - args.n, args.alpha, - buffers.ap_mat(), args.ap_offset, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Hpmv(args.layout, args.triangle, + args.n, args.alpha, + buffers.ap_mat(), args.ap_offset, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Hpmv(args.layout, args.triangle, + args.n, args.alpha, + buffers.ap_mat(), args.ap_offset, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xhpr.hpp b/test/routines/level2/xhpr.hpp index f9d580cd..a3bc60d1 100644 --- a/test/routines/level2/xhpr.hpp +++ b/test/routines/level2/xhpr.hpp @@ -71,14 +71,23 @@ class TestXhpr { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Hpr(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.ap_mat(), args.ap_offset, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Hpr(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.ap_mat(), args.ap_offset, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Hpr(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.ap_mat(), args.ap_offset, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xhpr2.hpp b/test/routines/level2/xhpr2.hpp index f946ba5c..1aa6cc54 100644 --- a/test/routines/level2/xhpr2.hpp +++ b/test/routines/level2/xhpr2.hpp @@ -75,15 +75,25 @@ class TestXhpr2 { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Hpr2(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - buffers.ap_mat(), args.ap_offset, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Hpr2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.ap_mat(), args.ap_offset, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Hpr2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.ap_mat(), args.ap_offset, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xsbmv.hpp b/test/routines/level2/xsbmv.hpp index 6481d19b..51d6441e 100644 --- a/test/routines/level2/xsbmv.hpp +++ b/test/routines/level2/xsbmv.hpp @@ -75,15 +75,25 @@ class TestXsbmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Sbmv(args.layout, args.triangle, - args.n, args.kl, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Sbmv(args.layout, args.triangle, + args.n, args.kl, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Sbmv(args.layout, args.triangle, + args.n, args.kl, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xspmv.hpp b/test/routines/level2/xspmv.hpp index 9815dbee..f3089836 100644 --- a/test/routines/level2/xspmv.hpp +++ b/test/routines/level2/xspmv.hpp @@ -75,15 +75,25 @@ class TestXspmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Spmv(args.layout, args.triangle, - args.n, args.alpha, - buffers.ap_mat(), args.ap_offset, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Spmv(args.layout, args.triangle, + args.n, args.alpha, + buffers.ap_mat(), args.ap_offset, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Spmv(args.layout, args.triangle, + args.n, args.alpha, + buffers.ap_mat(), args.ap_offset, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xspr.hpp b/test/routines/level2/xspr.hpp index 01a50c38..d76de610 100644 --- a/test/routines/level2/xspr.hpp +++ b/test/routines/level2/xspr.hpp @@ -71,14 +71,23 @@ class TestXspr { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Spr(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.ap_mat(), args.ap_offset, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Spr(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.ap_mat(), args.ap_offset, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Spr(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.ap_mat(), args.ap_offset, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xspr2.hpp b/test/routines/level2/xspr2.hpp index 55f8a141..5ce82a52 100644 --- a/test/routines/level2/xspr2.hpp +++ b/test/routines/level2/xspr2.hpp @@ -75,15 +75,25 @@ class TestXspr2 { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Spr2(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - buffers.ap_mat(), args.ap_offset, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Spr2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.ap_mat(), args.ap_offset, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Spr2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.ap_mat(), args.ap_offset, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xsymv.hpp b/test/routines/level2/xsymv.hpp index aec0dfb0..2a70756d 100644 --- a/test/routines/level2/xsymv.hpp +++ b/test/routines/level2/xsymv.hpp @@ -75,15 +75,25 @@ class TestXsymv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Symv(args.layout, args.triangle, - args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, args.beta, - buffers.y_vec(), args.y_offset, args.y_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Symv(args.layout, args.triangle, + args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Symv(args.layout, args.triangle, + args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, args.beta, + buffers.y_vec(), args.y_offset, args.y_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xsyr.hpp b/test/routines/level2/xsyr.hpp index 78b686d8..02aad990 100644 --- a/test/routines/level2/xsyr.hpp +++ b/test/routines/level2/xsyr.hpp @@ -71,14 +71,23 @@ class TestXsyr { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Syr(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.a_mat(), args.a_offset, args.a_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Syr(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Syr(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xsyr2.hpp b/test/routines/level2/xsyr2.hpp index 38aa4f43..492a9d2d 100644 --- a/test/routines/level2/xsyr2.hpp +++ b/test/routines/level2/xsyr2.hpp @@ -75,15 +75,25 @@ class TestXsyr2 { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Syr2(args.layout, args.triangle, - args.n, args.alpha, - buffers.x_vec(), args.x_offset, args.x_inc, - buffers.y_vec(), args.y_offset, args.y_inc, - buffers.a_mat(), args.a_offset, args.a_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Syr2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Syr2(args.layout, args.triangle, + args.n, args.alpha, + buffers.x_vec(), args.x_offset, args.x_inc, + buffers.y_vec(), args.y_offset, args.y_inc, + buffers.a_mat(), args.a_offset, args.a_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xtbmv.hpp b/test/routines/level2/xtbmv.hpp index 8c7aa381..a80d9e26 100644 --- a/test/routines/level2/xtbmv.hpp +++ b/test/routines/level2/xtbmv.hpp @@ -70,14 +70,24 @@ class TestXtbmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Tbmv(args.layout, args.triangle, args.a_transpose, args.diagonal, - args.n, args.kl, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Tbmv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, args.kl, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Tbmv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, args.kl, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xtpmv.hpp b/test/routines/level2/xtpmv.hpp index 3afab978..02f334a2 100644 --- a/test/routines/level2/xtpmv.hpp +++ b/test/routines/level2/xtpmv.hpp @@ -70,14 +70,23 @@ class TestXtpmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Tpmv(args.layout, args.triangle, args.a_transpose, args.diagonal, - args.n, - buffers.ap_mat(), args.ap_offset, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Tpmv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, + buffers.ap_mat(), args.ap_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Tpmv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, + buffers.ap_mat(), args.ap_offset, + buffers.x_vec(), args.x_offset, args.x_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xtrmv.hpp b/test/routines/level2/xtrmv.hpp index 2b71f151..4f2dd582 100644 --- a/test/routines/level2/xtrmv.hpp +++ b/test/routines/level2/xtrmv.hpp @@ -70,14 +70,23 @@ class TestXtrmv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Trmv(args.layout, args.triangle, args.a_transpose, args.diagonal, - args.n, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Trmv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Trmv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level2/xtrsv.hpp b/test/routines/level2/xtrsv.hpp index 85b50e85..aec8eace 100644 --- a/test/routines/level2/xtrsv.hpp +++ b/test/routines/level2/xtrsv.hpp @@ -85,14 +85,23 @@ class TestXtrsv { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Trsv(args.layout, args.triangle, args.a_transpose, args.diagonal, - args.n, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Trsv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Trsv(args.layout, args.triangle, args.a_transpose, args.diagonal, + args.n, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.x_vec(), args.x_offset, args.x_inc, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xgemm.hpp b/test/routines/level3/xgemm.hpp index 1c430c1c..8444c1c3 100644 --- a/test/routines/level3/xgemm.hpp +++ b/test/routines/level3/xgemm.hpp @@ -90,15 +90,25 @@ class TestXgemm { {{"XGEMM_MIN_INDIRECT_SIZE", switch_threshold}}); if (override_status != StatusCode::kSuccess) { return override_status; } } - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Gemm(args.layout, args.a_transpose, args.b_transpose, - args.m, args.n, args.k, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, args.beta, - buffers.c_mat(), args.c_offset, args.c_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Gemm(args.layout, args.a_transpose, args.b_transpose, + args.m, args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Gemm(args.layout, args.a_transpose, args.b_transpose, + args.m, args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xhemm.hpp b/test/routines/level3/xhemm.hpp index a89617b5..3b70d3f1 100644 --- a/test/routines/level3/xhemm.hpp +++ b/test/routines/level3/xhemm.hpp @@ -83,15 +83,25 @@ class TestXhemm { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Hemm(args.layout, args.side, args.triangle, - args.m, args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, args.beta, - buffers.c_mat(), args.c_offset, args.c_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Hemm(args.layout, args.side, args.triangle, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Hemm(args.layout, args.side, args.triangle, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xher2k.hpp b/test/routines/level3/xher2k.hpp index 55e6d894..6c4e12f1 100644 --- a/test/routines/level3/xher2k.hpp +++ b/test/routines/level3/xher2k.hpp @@ -81,16 +81,26 @@ class TestXher2k { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; auto alpha2 = T{args.alpha, args.alpha}; - auto status = Her2k(args.layout, args.triangle, args.a_transpose, - args.n, args.k, alpha2, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, args.beta, - buffers.c_mat(), args.c_offset, args.c_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Her2k(args.layout, args.triangle, args.a_transpose, + args.n, args.k, alpha2, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Her2k(args.layout, args.triangle, args.a_transpose, + args.n, args.k, alpha2, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xherk.hpp b/test/routines/level3/xherk.hpp index 3e1e7e02..c1bb7a0b 100644 --- a/test/routines/level3/xherk.hpp +++ b/test/routines/level3/xherk.hpp @@ -74,14 +74,23 @@ class TestXherk { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Herk(args.layout, args.triangle, args.a_transpose, - args.n, args.k, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, args.beta, - buffers.c_mat(), args.c_offset, args.c_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Herk(args.layout, args.triangle, args.a_transpose, + args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Herk(args.layout, args.triangle, args.a_transpose, + args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xsymm.hpp b/test/routines/level3/xsymm.hpp index 5d840d40..90cc1888 100644 --- a/test/routines/level3/xsymm.hpp +++ b/test/routines/level3/xsymm.hpp @@ -83,15 +83,25 @@ class TestXsymm { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Symm(args.layout, args.side, args.triangle, - args.m, args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, args.beta, - buffers.c_mat(), args.c_offset, args.c_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Symm(args.layout, args.side, args.triangle, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Symm(args.layout, args.side, args.triangle, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xsyr2k.hpp b/test/routines/level3/xsyr2k.hpp index 4a4a2f10..6b29aff7 100644 --- a/test/routines/level3/xsyr2k.hpp +++ b/test/routines/level3/xsyr2k.hpp @@ -81,15 +81,25 @@ class TestXsyr2k { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Syr2k(args.layout, args.triangle, args.a_transpose, - args.n, args.k, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, args.beta, - buffers.c_mat(), args.c_offset, args.c_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Syr2k(args.layout, args.triangle, args.a_transpose, + args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Syr2k(args.layout, args.triangle, args.a_transpose, + args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xsyrk.hpp b/test/routines/level3/xsyrk.hpp index 90e46727..b7782176 100644 --- a/test/routines/level3/xsyrk.hpp +++ b/test/routines/level3/xsyrk.hpp @@ -74,14 +74,23 @@ class TestXsyrk { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Syrk(args.layout, args.triangle, args.a_transpose, - args.n, args.k, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, args.beta, - buffers.c_mat(), args.c_offset, args.c_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Syrk(args.layout, args.triangle, args.a_transpose, + args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Syrk(args.layout, args.triangle, args.a_transpose, + args.n, args.k, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, args.beta, + buffers.c_mat(), args.c_offset, args.c_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xtrmm.hpp b/test/routines/level3/xtrmm.hpp index acc00e01..62d0f573 100644 --- a/test/routines/level3/xtrmm.hpp +++ b/test/routines/level3/xtrmm.hpp @@ -74,14 +74,23 @@ class TestXtrmm { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Trmm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal, - args.m, args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Trmm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Trmm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/level3/xtrsm.hpp b/test/routines/level3/xtrsm.hpp index d63c9d79..9ce1f09c 100644 --- a/test/routines/level3/xtrsm.hpp +++ b/test/routines/level3/xtrsm.hpp @@ -85,14 +85,23 @@ class TestXtrsm { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Trsm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal, - args.m, args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Trsm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Trsm(args.layout, args.side, args.triangle, args.a_transpose, args.diagonal, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/levelx/xaxpybatched.hpp b/test/routines/levelx/xaxpybatched.hpp index 4a8fc564..e9715f4e 100644 --- a/test/routines/levelx/xaxpybatched.hpp +++ b/test/routines/levelx/xaxpybatched.hpp @@ -83,14 +83,23 @@ class TestXaxpyBatched { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = AxpyBatched(args.n, args.alphas.data(), - buffers.x_vec(), args.x_offsets.data(), args.x_inc, - buffers.y_vec(), args.y_offsets.data(), args.y_inc, - args.batch_count, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = AxpyBatched(args.n, args.alphas.data(), + buffers.x_vec(), args.x_offsets.data(), args.x_inc, + buffers.y_vec(), args.y_offsets.data(), args.y_inc, + args.batch_count, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = AxpyBatched(args.n, args.alphas.data(), + buffers.x_vec(), args.x_offsets.data(), args.x_inc, + buffers.y_vec(), args.y_offsets.data(), args.y_inc, + args.batch_count, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/levelx/xgemmbatched.hpp b/test/routines/levelx/xgemmbatched.hpp index 704d0578..2a8bd9d4 100644 --- a/test/routines/levelx/xgemmbatched.hpp +++ b/test/routines/levelx/xgemmbatched.hpp @@ -108,8 +108,6 @@ class TestXgemmBatched { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; // Relaxed requirement on ld_a and ld_b within the library, this is here to match clBLAS auto a_rotated = (args.layout == Layout::kColMajor && args.a_transpose != Transpose::kNo) || (args.layout == Layout::kRowMajor && args.a_transpose == Transpose::kNo); @@ -119,14 +117,27 @@ class TestXgemmBatched { auto b_one = (!b_rotated) ? args.k : args.n; if (args.a_ld < a_one) { return StatusCode::kInvalidLeadDimA; } if (args.b_ld < b_one) { return StatusCode::kInvalidLeadDimB; } - auto status = GemmBatched(args.layout, args.a_transpose, args.b_transpose, - args.m, args.n, args.k, args.alphas.data(), - buffers.a_mat(), args.a_offsets.data(), args.a_ld, - buffers.b_mat(), args.b_offsets.data(), args.b_ld, args.betas.data(), - buffers.c_mat(), args.c_offsets.data(), args.c_ld, - args.batch_count, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = GemmBatched(args.layout, args.a_transpose, args.b_transpose, + args.m, args.n, args.k, args.alphas.data(), + buffers.a_mat(), args.a_offsets.data(), args.a_ld, + buffers.b_mat(), args.b_offsets.data(), args.b_ld, args.betas.data(), + buffers.c_mat(), args.c_offsets.data(), args.c_ld, + args.batch_count, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = GemmBatched(args.layout, args.a_transpose, args.b_transpose, + args.m, args.n, args.k, args.alphas.data(), + buffers.a_mat(), args.a_offsets.data(), args.a_ld, + buffers.b_mat(), args.b_offsets.data(), args.b_ld, args.betas.data(), + buffers.c_mat(), args.c_offsets.data(), args.c_ld, + args.batch_count, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/levelx/xim2col.hpp b/test/routines/levelx/xim2col.hpp index 4124190f..ebffe85e 100644 --- a/test/routines/levelx/xim2col.hpp +++ b/test/routines/levelx/xim2col.hpp @@ -84,17 +84,29 @@ public: // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Im2col(args.channels, args.height, args.width, - args.kernel_h, args.kernel_w, - args.pad_h, args.pad_w, - args.stride_h, args.stride_w, - args.dilation_h, args.dilation_w, - buffers.a_mat(), args.a_offset, - buffers.b_mat(), args.b_offset, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Im2col(args.channels, args.height, args.width, + args.kernel_h, args.kernel_w, + args.pad_h, args.pad_w, + args.stride_h, args.stride_w, + args.dilation_h, args.dilation_w, + buffers.a_mat(), args.a_offset, + buffers.b_mat(), args.b_offset, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Im2col(args.channels, args.height, args.width, + args.kernel_h, args.kernel_w, + args.pad_h, args.pad_w, + args.stride_h, args.stride_w, + args.dilation_h, args.dilation_w, + buffers.a_mat(), args.a_offset, + buffers.b_mat(), args.b_offset, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } diff --git a/test/routines/levelx/xinvert.hpp b/test/routines/levelx/xinvert.hpp index cc02a88b..3df1e2b0 100644 --- a/test/routines/levelx/xinvert.hpp +++ b/test/routines/levelx/xinvert.hpp @@ -164,14 +164,23 @@ class TestXinvert { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { try { - auto event = cl_event{}; - auto inverter = Xinvert(queue, &event); - inverter.InvertMatrixDiagonalBlocks(args.layout, args.triangle, args.diagonal, - args.n, args.m, - buffers.a_mat, args.a_offset, args.a_ld, - buffers.b_mat); - clWaitForEvents(1, &event); - clReleaseEvent(event); + #ifdef OPENCL_API + auto event = cl_event{}; + auto inverter = Xinvert(queue, &event); + inverter.InvertMatrixDiagonalBlocks(args.layout, args.triangle, args.diagonal, + args.n, args.m, + buffers.a_mat, args.a_offset, args.a_ld, + buffers.b_mat); + clWaitForEvents(1, &event); + clReleaseEvent(event); + #elif CUDA_API + auto inverter = Xinvert(queue, nullptr); + inverter.InvertMatrixDiagonalBlocks(args.layout, args.triangle, args.diagonal, + args.n, args.m, + buffers.a_mat, args.a_offset, args.a_ld, + buffers.b_mat); + cuStreamSynchronize(queue()); + #endif } catch (...) { return DispatchException(); } return StatusCode::kSuccess; } diff --git a/test/routines/levelx/xomatcopy.hpp b/test/routines/levelx/xomatcopy.hpp index 2736cf75..70bda452 100644 --- a/test/routines/levelx/xomatcopy.hpp +++ b/test/routines/levelx/xomatcopy.hpp @@ -126,14 +126,23 @@ class TestXomatcopy { // Describes how to run the CLBlast routine static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { - auto queue_plain = queue(); - auto event = cl_event{}; - auto status = Omatcopy(args.layout, args.a_transpose, - args.m, args.n, args.alpha, - buffers.a_mat(), args.a_offset, args.a_ld, - buffers.b_mat(), args.b_offset, args.b_ld, - &queue_plain, &event); - if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Omatcopy(args.layout, args.a_transpose, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } + #elif CUDA_API + auto status = Omatcopy(args.layout, args.a_transpose, + args.m, args.n, args.alpha, + buffers.a_mat(), args.a_offset, args.a_ld, + buffers.b_mat(), args.b_offset, args.b_ld, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); + #endif return status; } -- cgit v1.2.3 From 8431a165d02f55b4b4bcaa8920da65ad0558f2df Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 15 Oct 2017 19:38:48 +0200 Subject: Fixed a small copy-paste typo --- test/routines/level2/xtbmv.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/test/routines/level2/xtbmv.hpp b/test/routines/level2/xtbmv.hpp index a80d9e26..587676ca 100644 --- a/test/routines/level2/xtbmv.hpp +++ b/test/routines/level2/xtbmv.hpp @@ -84,7 +84,6 @@ class TestXtbmv { args.n, args.kl, buffers.a_mat(), args.a_offset, args.a_ld, buffers.x_vec(), args.x_offset, args.x_inc, - &queue_plain, &event); queue.GetContext()(), queue.GetDevice()()); cuStreamSynchronize(queue()); #endif -- cgit v1.2.3 From d62823f0674df0593836b9b0eb9c0d6e939acf86 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 15 Oct 2017 19:53:52 +0200 Subject: Added a missing OpenCL-to-CUDA function translation --- src/kernels/opencl_to_cuda.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/kernels/opencl_to_cuda.h b/src/kernels/opencl_to_cuda.h index 7602b539..e8206764 100644 --- a/src/kernels/opencl_to_cuda.h +++ b/src/kernels/opencl_to_cuda.h @@ -31,10 +31,15 @@ __device__ int get_group_id(const int x) { if (x == 1) { return blockIdx.y; } return blockIdx.z; } +__device__ int get_num_groups(const int x) { + if (x == 0) { return gridDim.x; } + if (x == 1) { return gridDim.y; } + return gridDim.z; +} __device__ int get_global_size(const int x) { if (x == 0) { return gridDim.x * blockDim.x; } if (x == 1) { return gridDim.y * blockDim.y; } - return gridDim.z; + return gridDim.z * blockDim.z; } __device__ int get_global_id(const int x) { if (x == 0) { return blockIdx.x*blockDim.x + threadIdx.x; } -- cgit v1.2.3 From 0719f1448655192d2ce6c17ee51c770ef16dd120 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Mon, 16 Oct 2017 21:52:55 +0200 Subject: Made all CUDA kernel launches synchronous; removed exception raising --- src/cupp11.hpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/cupp11.hpp b/src/cupp11.hpp index 2a54ef95..1c7e6c9c 100644 --- a/src/cupp11.hpp +++ b/src/cupp11.hpp @@ -146,7 +146,7 @@ public: } // Waits for completion of this event (not implemented for CUDA) - void WaitForCompletion() const { } + void WaitForCompletion() const { } // not needed due to cuStreamSynchronize call after each kernel launch // Retrieves the elapsed time of the last recorded event float GetElapsedTime() const { @@ -716,6 +716,10 @@ public: // Launches a kernel onto the specified queue void Launch(const Queue &queue, const std::vector &global, const std::vector &local, EventPointer event) { + // TODO: Currently this CUDA launch is always synchronous due to a cuStreamSynchronize call + if (local.size() == 0) { + throw LogicError("Kernel: launching with a default workgroup size is not implemented for the CUDA back-end"); + } // Creates the grid (number of threadblocks) and sets the block sizes (threads per block) auto grid = std::vector{1, 1, 1}; @@ -734,23 +738,18 @@ public: if (event) { CheckError(cuEventRecord(event->start(), queue())); } CheckError(cuLaunchKernel(kernel_, grid[0], grid[1], grid[2], block[0], block[1], block[2], 0, queue(), pointers.data(), nullptr)); + cuStreamSynchronize(queue()); if (event) { CheckError(cuEventRecord(event->end(), queue())); } } // As above, but with an event waiting list - // TODO: Implement this function void Launch(const Queue &queue, const std::vector &global, const std::vector &local, EventPointer event, const std::vector& waitForEvents) { - if (local.size() == 0) { - throw LogicError("Kernel: launching with a default workgroup size is not implemented for the CUDA back-end"); - } - else if (waitForEvents.size() != 0) { - throw LogicError("Kernel: launching with an event waiting list is not implemented for the CUDA back-end"); - } - else { - return Launch(queue, global, local, event); + for (auto &waitEvent : waitForEvents) { + waitEvent.WaitForCompletion(); // note: doesn't do anything, every kernel call is synchronous } + return Launch(queue, global, local, event); } // Accessors to the private data-members -- cgit v1.2.3 From 03760f80eb7eb07450da379d129ba64d92bfcc41 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Mon, 16 Oct 2017 21:54:23 +0200 Subject: Added CUDA API documentation --- CHANGELOG | 4 ++++ README.md | 14 +++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG b/CHANGELOG index bb2013a6..a2416dd3 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,5 +1,9 @@ Development (next version) +- Added a CUDA API to CLBlast: + * The library and kernels can be compiled with the CUDA driver API and NVRTC (requires CUDA 7.5) + * Two CUDA API sample programs are added: SGEMM and DAXPY + * All correctness tests and performance clients work on CUDA like they did for OpenCL - Kernels are now cached based on their tuning parameters: fits the use-case of 'OverrideParameters' - Improved performance for small GEMM problems by going from 3 to 1 optional temporary buffers - Various minor fixes and enhancements diff --git a/README.md b/README.md index c13770f6..dac47fce 100644 --- a/README.md +++ b/README.md @@ -99,11 +99,23 @@ To get started quickly, a couple of stand-alone example programs are included in cmake -DSAMPLES=ON .. +For all of CLBlast's APIs, it is possible to optionally set an OS environmental variable `CLBLAST_BUILD_OPTIONS` to pass specific build options to the OpenCL compiler. + + +Using the library (Netlib API) +------------- + There is also a Netlib CBLAS C API available. This is however not recommended for full control over performance, since at every call it will copy all buffers to and from the OpenCL device. Especially for level 1 and level 2 BLAS functions performance will be impacted severely. However, it can be useful if you don't want to touch OpenCL at all. You can set the default device and platform by setting the `CLBLAST_DEVICE` and `CLBLAST_PLATFORM` environmental variables. This API can be used as follows after providing the `-DNETLIB=ON` flag to CMake: #include -For all of CLBlast's APIs, it is possible to optionally set an OS environmental variable `CLBLAST_BUILD_OPTIONS` to pass specific build options to the OpenCL compiler. + +Using the library (CUDA API) +------------- + +There is also a CUDA API of CLBlast available. Enabling this compiles the whole library for CUDA and thus replaces the OpenCL API. It is based upon the CUDA runtime and NVRTC APIs, requiring NVIDIA CUDA 7.5 or higher. The CUDA version of the library can be used as follows after providing the `-DCUDA=ON -DOPENCL=OFF` flags to CMake: + + #include Using the tuners (optional) -- cgit v1.2.3 From f349731d5498f484995514112551c1b60ca4f6d3 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Tue, 17 Oct 2017 19:53:09 +0200 Subject: CUDA kernel compilation fixes --- src/kernels/level2/level2.opencl | 2 +- src/kernels/opencl_to_cuda.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/kernels/level2/level2.opencl b/src/kernels/level2/level2.opencl index 505231ca..ff46c2a5 100644 --- a/src/kernels/level2/level2.opencl +++ b/src/kernels/level2/level2.opencl @@ -34,7 +34,7 @@ R"( // Returns an element from a vector INLINE_FUNC real LoadVector(const int id, const int max, - __global real* gm, const int offset, const int inc, + const __global real* gm, const int offset, const int inc, const int do_conjugate) { if (id < max) { real result = gm[id*inc + offset]; diff --git a/src/kernels/opencl_to_cuda.h b/src/kernels/opencl_to_cuda.h index e8206764..5682a456 100644 --- a/src/kernels/opencl_to_cuda.h +++ b/src/kernels/opencl_to_cuda.h @@ -31,6 +31,11 @@ __device__ int get_group_id(const int x) { if (x == 1) { return blockIdx.y; } return blockIdx.z; } +__device__ int get_local_size(const int x) { + if (x == 0) { return blockDim.x; } + if (x == 1) { return blockDim.y; } + return blockDim.z; +} __device__ int get_num_groups(const int x) { if (x == 0) { return gridDim.x; } if (x == 1) { return gridDim.y; } -- cgit v1.2.3 From b1270f04b89c3271aca11594501f7e997848e394 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Tue, 17 Oct 2017 19:56:47 +0200 Subject: Made buffers of batched routines read/write (was: read-only) --- src/clpp11.hpp | 3 +++ src/routines/levelx/xaxpybatched.cpp | 6 +++--- src/routines/levelx/xgemmbatched.cpp | 22 +++++++++++----------- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/src/clpp11.hpp b/src/clpp11.hpp index 97045644..2335caef 100644 --- a/src/clpp11.hpp +++ b/src/clpp11.hpp @@ -668,6 +668,9 @@ class Buffer { // Copies from host to device: writing the device buffer a-synchronously void WriteAsync(const Queue &queue, const size_t size, const T* host, const size_t offset = 0) { + if (access_ == BufferAccess::kReadOnly) { + throw LogicError("Buffer: writing to a read-only buffer"); + } if (GetSize() < (offset+size)*sizeof(T)) { throw LogicError("Buffer: target device buffer is too small"); } diff --git a/src/routines/levelx/xaxpybatched.cpp b/src/routines/levelx/xaxpybatched.cpp index 0b755ccf..52c27b78 100644 --- a/src/routines/levelx/xaxpybatched.cpp +++ b/src/routines/levelx/xaxpybatched.cpp @@ -59,9 +59,9 @@ void XaxpyBatched::DoAxpyBatched(const size_t n, const std::vector &alphas x_offsets_int[batch] = static_cast(x_offsets[batch]); y_offsets_int[batch] = static_cast(y_offsets[batch]); } - auto x_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto y_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto alphas_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); + auto x_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto y_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto alphas_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); x_offsets_device.Write(queue_, batch_count, x_offsets_int); y_offsets_device.Write(queue_, batch_count, y_offsets_int); alphas_device.Write(queue_, batch_count, alphas); diff --git a/src/routines/levelx/xgemmbatched.cpp b/src/routines/levelx/xgemmbatched.cpp index 4e9f0004..8a015e97 100644 --- a/src/routines/levelx/xgemmbatched.cpp +++ b/src/routines/levelx/xgemmbatched.cpp @@ -100,8 +100,8 @@ void XgemmBatched::DoGemmBatched(const Layout layout, const Transpose a_trans } // Upload the scalar arguments to the device - auto alphas_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto betas_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); + auto alphas_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto betas_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); alphas_device.Write(queue_, batch_count, alphas); betas_device.Write(queue_, batch_count, betas); @@ -200,8 +200,8 @@ void XgemmBatched::BatchedGemmIndirect(const size_t m, const size_t n, const // to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In // case nothing has to be done, these kernels can be skipped. if (!a_no_temp) { - auto a_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto a_offsets_i_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); + auto a_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto a_offsets_i_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); a_offsets_device.Write(queue_, batch_count, a_offsets); a_offsets_i_device.Write(queue_, batch_count, a_offsets_i); auto eventProcessA = Event(); @@ -214,8 +214,8 @@ void XgemmBatched::BatchedGemmIndirect(const size_t m, const size_t n, const // As above, but now for matrix B if (!b_no_temp) { - auto b_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto b_offsets_i_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); + auto b_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto b_offsets_i_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); b_offsets_device.Write(queue_, batch_count, b_offsets); b_offsets_i_device.Write(queue_, batch_count, b_offsets_i); auto eventProcessB = Event(); @@ -227,8 +227,8 @@ void XgemmBatched::BatchedGemmIndirect(const size_t m, const size_t n, const } // As above, but now for matrix C - auto c_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto c_offsets_i_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); + auto c_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto c_offsets_i_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); if (!c_no_temp) { c_offsets_device.Write(queue_, batch_count, c_offsets); c_offsets_i_device.Write(queue_, batch_count, c_offsets_i); @@ -297,9 +297,9 @@ void XgemmBatched::BatchedGemmDirect(const size_t m, const size_t n, const si const size_t batch_count) { // Uploads the offsets to the device - auto a_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto b_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); - auto c_offsets_device = Buffer(context_, BufferAccess::kReadOnly, batch_count); + auto a_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto b_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto c_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); a_offsets_device.Write(queue_, batch_count, a_offsets); b_offsets_device.Write(queue_, batch_count, b_offsets); c_offsets_device.Write(queue_, batch_count, c_offsets); -- cgit v1.2.3 From 9d879c949a04102d536e0e7980b0ce78f5cf1be1 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Tue, 17 Oct 2017 20:29:23 +0200 Subject: Fix an incompatibility with CUDA's FP16 definition --- src/cupp11.hpp | 1 + test/wrapper_cuda.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/src/cupp11.hpp b/src/cupp11.hpp index 1c7e6c9c..71fdc3cd 100644 --- a/src/cupp11.hpp +++ b/src/cupp11.hpp @@ -44,6 +44,7 @@ #include // std::strlen // CUDA +#define CUDA_NO_HALF // Incompatible with CLBlast's definition; TODO: resolve this #include // CUDA driver API #include // NVIDIA runtime compilation API diff --git a/test/wrapper_cuda.hpp b/test/wrapper_cuda.hpp index c97ae3ef..12417cdd 100644 --- a/test/wrapper_cuda.hpp +++ b/test/wrapper_cuda.hpp @@ -22,6 +22,7 @@ #include "utilities/utilities.hpp" #ifdef CLBLAST_REF_CUBLAS + #define CUDA_NO_HALF #include #include #endif -- cgit v1.2.3 From 363568787ebfcdc0c5e6af9c3c8e71c702e2f951 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Wed, 18 Oct 2017 18:17:30 +0200 Subject: Moved CUmodule code from Kernel to Program class to not require re-compilation every time --- src/cupp11.hpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cupp11.hpp b/src/cupp11.hpp index 71fdc3cd..ec21c5b1 100644 --- a/src/cupp11.hpp +++ b/src/cupp11.hpp @@ -411,6 +411,7 @@ public: } auto status = nvrtcCompileProgram(*program_, raw_options.size(), raw_options.data()); CLCudaAPINVRTCError::Check(status, "nvrtcCompileProgram"); + CheckError(cuModuleLoadDataEx(&module_, GetIR().data(), 0, nullptr, nullptr)); } // Confirms whether a certain status code is an actual compilation error or warning @@ -440,10 +441,12 @@ public: return result; } - // Accessor to the private data-member + // Accessor to the private data-members + const CUmodule GetModule() const { return module_; } const nvrtcProgram& operator()() const { return *program_; } private: std::shared_ptr program_; + CUmodule module_; std::string source_; bool from_binary_; }; @@ -665,16 +668,14 @@ class Kernel { public: // Constructor based on the regular CUDA data-type: memory management is handled elsewhere - explicit Kernel(const CUmodule module, const CUfunction kernel): + explicit Kernel(const CUfunction kernel): name_("unknown"), - module_(module), kernel_(kernel) { } // Regular constructor with memory management explicit Kernel(const Program &program, const std::string &name): name_(name) { - CheckError(cuModuleLoadDataEx(&module_, program.GetIR().data(), 0, nullptr, nullptr)); - CheckError(cuModuleGetFunction(&kernel_, module_, name.c_str())); + CheckError(cuModuleGetFunction(&kernel_, program.GetModule(), name.c_str())); } // Sets a kernel argument at the indicated position. This stores both the value of the argument @@ -758,7 +759,6 @@ public: CUfunction operator()() { return kernel_; } private: const std::string name_; - CUmodule module_; CUfunction kernel_; std::vector arguments_indices_; // Indices of the arguments std::vector arguments_data_; // The arguments data as raw bytes -- cgit v1.2.3