diff options
Diffstat (limited to 'src')
35 files changed, 429 insertions, 353 deletions
diff --git a/src/database/kernel_selection.hpp b/src/database/kernel_selection.hpp index 44ef0d32..82c7d59d 100644 --- a/src/database/kernel_selection.hpp +++ b/src/database/kernel_selection.hpp @@ -52,6 +52,11 @@ const Database::DatabaseEntry KernelSelectionSingle = { { "default", { 1280*1280*1280 } }, } }, + { + kDeviceTypeGPU, "ARM", { + { "default", { 128*128*128} }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 512*512*512 } }, diff --git a/src/database/kernels/copy.hpp b/src/database/kernels/copy.hpp index 259f95c3..e5defb32 100644 --- a/src/database/kernels/copy.hpp +++ b/src/database/kernels/copy.hpp @@ -30,6 +30,12 @@ const Database::DatabaseEntry CopyHalf = { { "default", { 8, 32, 4, 8 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 32, 8, 8, 1 } }, + { "default", { 32, 8, 8, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 16, 8, 4, 4 } }, @@ -113,6 +119,12 @@ const Database::DatabaseEntry CopySingle = { { "default", { 8, 32, 4, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 32, 8, 8, 1 } }, + { "default", { 32, 8, 8, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 32, 8, 4, 4 } }, @@ -187,6 +199,12 @@ const Database::DatabaseEntry CopyComplexSingle = { { "default", { 32, 8, 1, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 32, 8, 1, 1 } }, + { "default", { 32, 8, 1, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 16, 8, 1, 2 } }, diff --git a/src/database/kernels/pad.hpp b/src/database/kernels/pad.hpp index f925d07d..b6ebde43 100644 --- a/src/database/kernels/pad.hpp +++ b/src/database/kernels/pad.hpp @@ -30,9 +30,15 @@ const Database::DatabaseEntry PadHalf = { { "default", { 8, 8, 2, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 16, 8, 4, 2 } }, + { "default", { 16, 8, 4, 2 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 8, 8, 2, 1 } }, + { "default", { 8, 8, 4, 1 } }, } }, } @@ -113,6 +119,12 @@ const Database::DatabaseEntry PadSingle = { { "default", { 32, 8, 4, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 32, 8, 2, 1 } }, + { "default", { 32, 8, 2, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 32, 8, 2, 1 } }, @@ -195,9 +207,15 @@ const Database::DatabaseEntry PadComplexSingle = { { "default", { 32, 8, 1, 2 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 32, 8, 4, 1 } }, + { "default", { 32, 8, 4, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 32, 8, 1, 2 } }, + { "default", { 32, 8, 1, 1 } }, } }, } diff --git a/src/database/kernels/padtranspose.hpp b/src/database/kernels/padtranspose.hpp index b80a1666..bbda5c65 100644 --- a/src/database/kernels/padtranspose.hpp +++ b/src/database/kernels/padtranspose.hpp @@ -30,6 +30,12 @@ const Database::DatabaseEntry PadtransposeHalf = { { "default", { 0, 8, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 0, 8, 8 } }, + { "default", { 0, 8, 8 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 0, 8, 1 } }, @@ -112,6 +118,12 @@ const Database::DatabaseEntry PadtransposeSingle = { { "default", { 1, 32, 2 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 0, 8, 2 } }, + { "default", { 0, 8, 2 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 1, 16, 2 } }, @@ -194,9 +206,15 @@ const Database::DatabaseEntry PadtransposeComplexSingle = { { "default", { 1, 16, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 0, 8, 4 } }, + { "default", { 0, 8, 4 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 1, 16, 2 } }, + { "default", { 1, 8, 2 } }, } }, } diff --git a/src/database/kernels/transpose.hpp b/src/database/kernels/transpose.hpp index 446b632c..b00a23dc 100644 --- a/src/database/kernels/transpose.hpp +++ b/src/database/kernels/transpose.hpp @@ -30,9 +30,15 @@ const Database::DatabaseEntry TransposeHalf = { { "default", { 8, 1, 0, 8 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 8, 0, 0, 4 } }, + { "default", { 8, 0, 0, 4 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 16, 0, 1, 4 } }, + { "default", { 8, 0, 1, 8 } }, } }, } @@ -113,6 +119,12 @@ const Database::DatabaseEntry TransposeSingle = { { "default", { 8, 1, 0, 4 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 8, 1, 1, 4 } }, + { "default", { 8, 1, 1, 4 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 8, 0, 1, 4 } }, @@ -189,6 +201,12 @@ const Database::DatabaseEntry TransposeComplexSingle = { { "default", { 16, 1, 0, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 16, 1, 0, 1 } }, + { "default", { 16, 1, 0, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 8, 1, 1, 2 } }, diff --git a/src/database/kernels/xaxpy.hpp b/src/database/kernels/xaxpy.hpp index 58cde9d3..5cb225d1 100644 --- a/src/database/kernels/xaxpy.hpp +++ b/src/database/kernels/xaxpy.hpp @@ -30,9 +30,15 @@ const Database::DatabaseEntry XaxpyHalf = { { "default", { 8, 64, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 8, 64, 1 } }, + { "default", { 8, 64, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 8, 256, 4 } }, + { "default", { 8, 64, 1 } }, } }, } @@ -113,9 +119,15 @@ const Database::DatabaseEntry XaxpySingle = { { "default", { 4, 1024, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 4, 128, 2 } }, + { "default", { 4, 128, 2 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 4, 256, 1 } }, + { "default", { 4, 64, 1 } }, } }, } @@ -195,6 +207,12 @@ const Database::DatabaseEntry XaxpyComplexSingle = { { "default", { 1, 256, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 1, 64, 1 } }, + { "default", { 1, 64, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 1, 128, 1 } }, diff --git a/src/database/kernels/xdot.hpp b/src/database/kernels/xdot.hpp index d234c558..986c32b2 100644 --- a/src/database/kernels/xdot.hpp +++ b/src/database/kernels/xdot.hpp @@ -30,9 +30,15 @@ const Database::DatabaseEntry XdotHalf = { { "default", { 128, 32 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 64, 64 } }, + { "default", { 64, 64 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 128, 32 } }, + { "default", { 128, 64 } }, } }, } @@ -95,6 +101,12 @@ const Database::DatabaseEntry XdotSingle = { { "default", { 256, 64 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 128, 64 } }, + { "default", { 128, 64 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 128, 32 } }, @@ -159,6 +171,12 @@ const Database::DatabaseEntry XdotComplexSingle = { { "default", { 512, 64 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 64, 256 } }, + { "default", { 64, 256 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 256, 32 } }, diff --git a/src/database/kernels/xgemm.hpp b/src/database/kernels/xgemm.hpp index 2270dd44..43854afb 100644 --- a/src/database/kernels/xgemm.hpp +++ b/src/database/kernels/xgemm.hpp @@ -112,9 +112,15 @@ const Database::DatabaseEntry XgemmSingle = { { "default", { 32, 2, 16, 16, 64, 8, 8, 64, 1, 1, 0, 0, 4, 2 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 32, 2, 8, 8, 32, 8, 8, 32, 1, 1, 0, 0, 4, 1 } }, + { "default", { 32, 2, 8, 8, 32, 8, 8, 32, 1, 1, 0, 0, 4, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 32, 2, 16, 16, 64, 8, 8, 64, 1, 1, 0, 0, 4, 4 } }, + { "default", { 32, 2, 8, 8, 32, 8, 8, 32, 1, 1, 0, 0, 4, 2 } }, } }, } diff --git a/src/database/kernels/xgemm_direct.hpp b/src/database/kernels/xgemm_direct.hpp index 7a1cd983..acace63f 100644 --- a/src/database/kernels/xgemm_direct.hpp +++ b/src/database/kernels/xgemm_direct.hpp @@ -77,9 +77,15 @@ const Database::DatabaseEntry XgemmDirectSingle = { { "default", { 2, 8, 8, 16, 16, 1, 1, 4, 2, 32 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 2, 8, 8, 8, 8, 1, 1, 2, 1, 16 } }, + { "default", { 2, 8, 8, 8, 8, 1, 1, 2, 1, 16 } }, + } + }, { // Default kDeviceTypeAll, "default", { - { "default", { 2, 8, 8, 8, 8, 1, 1, 4, 2, 32 } }, + { "default", { 2, 8, 8, 8, 8, 1, 1, 1, 2, 16 } }, } }, } diff --git a/src/database/kernels/xgemv.hpp b/src/database/kernels/xgemv.hpp index 7adb6f10..c537294a 100644 --- a/src/database/kernels/xgemv.hpp +++ b/src/database/kernels/xgemv.hpp @@ -106,6 +106,12 @@ const Database::DatabaseEntry XgemvSingle = { { "default", { 256, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 64, 1 } }, + { "default", { 64, 1 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 128, 1 } }, diff --git a/src/database/kernels/xgemv_fast.hpp b/src/database/kernels/xgemv_fast.hpp index 8c42aa0e..c3b9103a 100644 --- a/src/database/kernels/xgemv_fast.hpp +++ b/src/database/kernels/xgemv_fast.hpp @@ -106,6 +106,12 @@ const Database::DatabaseEntry XgemvFastSingle = { { "default", { 1, 256, 1 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 1, 64, 4 } }, + { "default", { 1, 64, 4 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 1, 64, 1 } }, diff --git a/src/database/kernels/xgemv_fast_rot.hpp b/src/database/kernels/xgemv_fast_rot.hpp index 644498e2..7e5905e4 100644 --- a/src/database/kernels/xgemv_fast_rot.hpp +++ b/src/database/kernels/xgemv_fast_rot.hpp @@ -82,6 +82,12 @@ const Database::DatabaseEntry XgemvFastRotSingle = { { "default", { 8, 32, 32 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 4, 64, 16 } }, + { "default", { 4, 64, 16 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 8, 32, 32 } }, diff --git a/src/database/kernels/xger.hpp b/src/database/kernels/xger.hpp index d294ab43..e17396f6 100644 --- a/src/database/kernels/xger.hpp +++ b/src/database/kernels/xger.hpp @@ -30,6 +30,12 @@ const Database::DatabaseEntry XgerHalf = { { "default", { 4, 8, 2 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 64, 4, 2 } }, + { "default", { 64, 4, 2 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 64, 1, 2 } }, @@ -101,6 +107,12 @@ const Database::DatabaseEntry XgerSingle = { { "default", { 128, 1, 2 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 128, 1, 2 } }, + { "default", { 128, 1, 2 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 32, 4, 2 } }, @@ -171,6 +183,12 @@ const Database::DatabaseEntry XgerComplexSingle = { { "default", { 128, 2, 2 } }, } }, + { // QUALCOMM GPUs + kDeviceTypeGPU, "QUALCOMM", { + { "QUALCOMM Adreno(TM)", { 64, 1, 4 } }, + { "default", { 64, 1, 4 } }, + } + }, { // Default kDeviceTypeAll, "default", { { "default", { 64, 2, 2 } }, diff --git a/src/kernels/common.opencl b/src/kernels/common.opencl index db4c8ec4..9481881e 100644 --- a/src/kernels/common.opencl +++ b/src/kernels/common.opencl @@ -235,6 +235,15 @@ R"( // ================================================================================================= +// Force inlining functions or not: some compilers don't support the inline keyword +#ifdef USE_INLINE_KEYWORD + #define INLINE_FUNC inline +#else + #define INLINE_FUNC +#endif + +// ================================================================================================= + // Shuffled workgroup indices to avoid partition camping, see below. For specific devices, this is // enabled (see src/routine.cc). #ifndef USE_STAGGERED_INDICES @@ -245,18 +254,18 @@ R"( // http://docs.nvidia.com/cuda/samples/6_Advanced/transpose/doc/MatrixTranspose.pdf // More details: https://github.com/CNugteren/CLBlast/issues/53 #if USE_STAGGERED_INDICES == 1 - inline size_t GetGroupIDFlat() { + INLINE_FUNC size_t GetGroupIDFlat() { return get_group_id(0) + get_num_groups(0) * get_group_id(1); } - inline size_t GetGroupID1() { + INLINE_FUNC size_t GetGroupID1() { return (GetGroupIDFlat()) % get_num_groups(1); } - inline size_t GetGroupID0() { + INLINE_FUNC size_t GetGroupID0() { return ((GetGroupIDFlat() / get_num_groups(1)) + GetGroupID1()) % get_num_groups(0); } #else - inline size_t GetGroupID1() { return get_group_id(1); } - inline size_t GetGroupID0() { return get_group_id(0); } + INLINE_FUNC size_t GetGroupID1() { return get_group_id(1); } + INLINE_FUNC size_t GetGroupID0() { return get_group_id(0); } #endif // ================================================================================================= diff --git a/src/kernels/level1/level1.opencl b/src/kernels/level1/level1.opencl index 7e10426b..3c60c54a 100644 --- a/src/kernels/level1/level1.opencl +++ b/src/kernels/level1/level1.opencl @@ -47,7 +47,7 @@ R"( // ================================================================================================= // The vectorized multiply function -inline realV MultiplyVector(realV cvec, const real aval, const realV bvec) { +INLINE_FUNC realV MultiplyVector(realV cvec, const real aval, const realV bvec) { #if VW == 1 Multiply(cvec, aval, bvec); #elif VW == 2 @@ -89,7 +89,7 @@ inline realV MultiplyVector(realV cvec, const real aval, const realV bvec) { } // The vectorized multiply-add function -inline realV MultiplyAddVector(realV cvec, const real aval, const realV bvec) { +INLINE_FUNC realV MultiplyAddVector(realV cvec, const real aval, const realV bvec) { #if VW == 1 MultiplyAdd(cvec, aval, bvec); #elif VW == 2 diff --git a/src/kernels/level2/level2.opencl b/src/kernels/level2/level2.opencl index be979766..505231ca 100644 --- a/src/kernels/level2/level2.opencl +++ b/src/kernels/level2/level2.opencl @@ -33,9 +33,9 @@ R"( // ================================================================================================= // Returns an element from a vector -inline real LoadVector(const int id, const int max, - __global real* gm, const int offset, const int inc, - const int do_conjugate) { +INLINE_FUNC real LoadVector(const int id, const int max, + __global real* gm, const int offset, const int inc, + const int do_conjugate) { if (id < max) { real result = gm[id*inc + offset]; if (do_conjugate) { @@ -53,10 +53,10 @@ inline real LoadVector(const int id, const int max, } // Performs the rank-1 matrix update -inline void MatrixUpdate(const int id1, const int id2, const int max1, const int max2, - __global real* agm, const int a_offset, const int a_ld, - const real alpha, const real xvalue, const real yvalue, - const int is_upper) { +INLINE_FUNC void MatrixUpdate(const int id1, const int id2, const int max1, const int max2, + __global real* agm, const int a_offset, const int a_ld, + const real alpha, const real xvalue, const real yvalue, + const int is_upper) { // Bounds of a regular matrix if (id1 < max1 && id2 < max2) { @@ -100,11 +100,11 @@ inline void MatrixUpdate(const int id1, const int id2, const int max1, const int } // Performs the rank-2 matrix update -inline void MatrixUpdate2(const int id1, const int id2, const int max1, const int max2, - __global real* agm, const int a_offset, const int a_ld, - const real alpha1, const real xvalue, const real yvalue, - const real alpha2, const real xtvalue, const real ytvalue, - const int is_upper) { +INLINE_FUNC void MatrixUpdate2(const int id1, const int id2, const int max1, const int max2, + __global real* agm, const int a_offset, const int a_ld, + const real alpha1, const real xvalue, const real yvalue, + const real alpha2, const real xtvalue, const real ytvalue, + const int is_upper) { // Bounds of a regular matrix if (id1 < max1 && id2 < max2) { diff --git a/src/kernels/level2/xgemv.opencl b/src/kernels/level2/xgemv.opencl index ff011acd..ea0478f0 100644 --- a/src/kernels/level2/xgemv.opencl +++ b/src/kernels/level2/xgemv.opencl @@ -36,9 +36,9 @@ R"( // ================================================================================================= // Defines how to load the input matrix in the non-vectorized case -inline real LoadMatrixA(const __global real* restrict agm, const int x, const int y, - const int a_ld, const int a_offset, const int parameter, - const int kl, const int ku) { +INLINE_FUNC real LoadMatrixA(const __global real* restrict agm, const int x, const int y, + const int a_ld, const int a_offset, const int parameter, + const int kl, const int ku) { real result; // For banded matrices diff --git a/src/kernels/level2/xgemv_fast.opencl b/src/kernels/level2/xgemv_fast.opencl index 02a1f956..8a08f076 100644 --- a/src/kernels/level2/xgemv_fast.opencl +++ b/src/kernels/level2/xgemv_fast.opencl @@ -75,8 +75,8 @@ R"( // ================================================================================================= // Loads a vector input value -inline realVF LoadMatrixAVF(const __global realVF* restrict agm, const int x, const int y, - const int a_ld) { +INLINE_FUNC realVF LoadMatrixAVF(const __global realVF* restrict agm, const int x, const int y, + const int a_ld) { return agm[a_ld*y + x]; } diff --git a/src/kernels/level3/copy_pad.opencl b/src/kernels/level3/copy_pad.opencl index 93b89187..6eeadbd1 100644 --- a/src/kernels/level3/copy_pad.opencl +++ b/src/kernels/level3/copy_pad.opencl @@ -24,14 +24,14 @@ R"( // Copies a matrix from source to destination. The output is padded with zero values in case the // destination matrix dimensions are larger than the source matrix dimensions. Additionally, the ld // value and offset can be different. -inline void _CopyPadMatrix(const int src_one, const int src_two, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_one, const int dest_two, - const int dest_ld, const int dest_offset, - __global real* dest, - const real alpha, - const int do_conjugate) { +INLINE_FUNC void _CopyPadMatrix(const int src_one, const int src_two, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_one, const int dest_two, + const int dest_ld, const int dest_offset, + __global real* dest, + const real alpha, + const int do_conjugate) { // Loops over the work per thread in both dimensions #pragma unroll @@ -79,15 +79,15 @@ void CopyPadMatrix(const int src_one, const int src_two, // Same as above, but now un-pads a matrix. This kernel reads data from a padded source matrix, but // writes only the actual data back to the destination matrix. Again, the ld value and offset can // be different. -inline void _CopyMatrix(const int src_one, const int src_two, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_one, const int dest_two, - const int dest_ld, const int dest_offset, - __global real* dest, - const real alpha, - const int upper, const int lower, - const int diagonal_imag_zero) { +INLINE_FUNC void _CopyMatrix(const int src_one, const int src_two, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_one, const int dest_two, + const int dest_ld, const int dest_offset, + __global real* dest, + const real alpha, + const int upper, const int lower, + const int diagonal_imag_zero) { // Loops over the work per thread in both dimensions #pragma unroll diff --git a/src/kernels/level3/invert_diagonal_blocks.opencl b/src/kernels/level3/invert_diagonal_blocks.opencl index 874c1510..93241700 100644 --- a/src/kernels/level3/invert_diagonal_blocks.opencl +++ b/src/kernels/level3/invert_diagonal_blocks.opencl @@ -164,10 +164,10 @@ void InvertDiagonalBlock(int n, __global const real* restrict src, const int src // ================================================================================================= // Triple matrix-multiplication kernel: C = A * B -inline void TripleMatMul(const int size, const bool upper, const int part, __local real* blm, int n, - __global const real* agm, __global const real* bgm, __global real* cgm, - const int lda, const int ldb, const int ldc, - int current_size, int num_pages, const int block_size) { +INLINE_FUNC void TripleMatMul(const int size, const bool upper, const int part, __local real* blm, int n, + __global const real* agm, __global const real* bgm, __global real* cgm, + const int lda, const int ldb, const int ldc, + int current_size, int num_pages, const int block_size) { // Emulates a 3D grid: NX * (NY * num_pages) const int by = get_group_id(1) / num_pages; @@ -250,9 +250,9 @@ inline void TripleMatMul(const int size, const bool upper, const int part, __loc // ================================================================================================= // Triple matrix-multiplication kernel part 1: B12 = A12 * B22 (upper) or B21 = A21 * B11 (lower) -inline void TripleMatMulPart1(const int size, const bool upper, __local real* blm, int n, - __global const real* src, const int a_offset, const int lda, - __global real* dest, int current_size, int num_pages, const int block_size) { +INLINE_FUNC void TripleMatMulPart1(const int size, const bool upper, __local real* blm, int n, + __global const real* src, const int a_offset, const int lda, + __global real* dest, int current_size, int num_pages, const int block_size) { // Emulates a 3D grid: NX * (NY * num_pages) const int page = get_group_id(1) % num_pages; @@ -286,8 +286,8 @@ inline void TripleMatMulPart1(const int size, const bool upper, __local real* bl } // Triple matrix-multiplication kernel part 1: B12 = -B11 * B12 (upper) or B21 = -B22 * B21 (lower) -inline void TripleMatMulPart2(const int size, const bool upper, __local real* blm, const int n, - __global real* dest, int current_size, int num_pages, const int block_size) { +INLINE_FUNC void TripleMatMulPart2(const int size, const bool upper, __local real* blm, const int n, + __global real* dest, int current_size, int num_pages, const int block_size) { // Emulates a 3D grid: NX * (NY * num_pages) const int page = get_group_id(1) % num_pages; diff --git a/src/kernels/level3/transpose_pad.opencl b/src/kernels/level3/transpose_pad.opencl index fb60ce75..49c5b9a3 100644 --- a/src/kernels/level3/transpose_pad.opencl +++ b/src/kernels/level3/transpose_pad.opencl @@ -24,15 +24,15 @@ R"( // Transposes a matrix from source to destination. The output is padded with zero values in case the // destination matrix dimensions are larger than the transposed source matrix dimensions. -inline void _TransposePadMatrix(__local real* tile, - const int src_one, const int src_two, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_one, const int dest_two, - const int dest_ld, const int dest_offset, - __global real* dest, - const real alpha, - const int do_conjugate) { +INLINE_FUNC void _TransposePadMatrix(__local real* tile, + const int src_one, const int src_two, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_one, const int dest_two, + const int dest_ld, const int dest_offset, + __global real* dest, + const real alpha, + const int do_conjugate) { // Loop over the work per thread #pragma unroll @@ -105,16 +105,16 @@ void TransposePadMatrix(const int src_one, const int src_two, // Transposes a matrix, while considering possible padding in the source matrix. Data is read from a // padded source matrix, but only the actual data is written back to the transposed destination // matrix. This kernel optionally checks for upper/lower triangular matrices. -inline void _TransposeMatrix(__local real* tile, - const int src_one, const int src_two, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_one, const int dest_two, - const int dest_ld, const int dest_offset, - __global real* dest, - const real alpha, - const int upper, const int lower, - const int diagonal_imag_zero) { +INLINE_FUNC void _TransposeMatrix(__local real* tile, + const int src_one, const int src_two, + const int src_ld, const int src_offset, + __global const real* restrict src, + const int dest_one, const int dest_two, + const int dest_ld, const int dest_offset, + __global real* dest, + const real alpha, + const int upper, const int lower, + const int diagonal_imag_zero) { // Loop over the work per thread #pragma unroll diff --git a/src/kernels/level3/xgemm_direct_part1.opencl b/src/kernels/level3/xgemm_direct_part1.opencl index a8bd450e..8b650589 100644 --- a/src/kernels/level3/xgemm_direct_part1.opencl +++ b/src/kernels/level3/xgemm_direct_part1.opencl @@ -93,7 +93,7 @@ R"( // ================================================================================================= // Initializes the accumulation registers to zero -inline void InitAccRegistersDirect(real cpm[NWID][MWID]) { +INLINE_FUNC void InitAccRegistersDirect(real cpm[NWID][MWID]) { #pragma unroll for (int mi=0; mi<MWID; ++mi) { #pragma unroll @@ -106,7 +106,7 @@ inline void InitAccRegistersDirect(real cpm[NWID][MWID]) { // ================================================================================================= // Performs the actual computation: Cpm += Apm * Bpm -inline void MultiplyAccumulateDirect(real cpm[NWID][MWID], real apm[MWID], real bpm[NWID]) { +INLINE_FUNC void MultiplyAccumulateDirect(real cpm[NWID][MWID], real apm[MWID], real bpm[NWID]) { #pragma unroll for (int ni=0; ni<NWID; ++ni) { #pragma unroll @@ -120,9 +120,9 @@ inline void MultiplyAccumulateDirect(real cpm[NWID][MWID], real apm[MWID], real // Loads global off-chip memory into thread-private register files. This function is specific for // loading the A input matrix. -inline void GlobalToPrivateDirectA(const __global real* restrict agms, real apm[MWID], - const int a_ld, const int a_offset, const int idm, const int idk, - const int a_transpose, const int a_conjugate) { +INLINE_FUNC void GlobalToPrivateDirectA(const __global real* restrict agms, real apm[MWID], + const int a_ld, const int a_offset, const int idm, const int idk, + const int a_transpose, const int a_conjugate) { #pragma unroll for (int mi=0; mi<MWID; ++mi) { const int a_index = (a_transpose) ? (idm + mi)*a_ld + idk : idk*a_ld + (idm + mi); @@ -132,9 +132,9 @@ inline void GlobalToPrivateDirectA(const __global real* restrict agms, real apm[ } // Same as above, but now for the B input matrix -inline void GlobalToPrivateDirectB(const __global real* restrict bgms, real bpm[NWID], - const int b_ld, const int b_offset, const int idn, const int idk, - const int b_transpose, const int b_conjugate) { +INLINE_FUNC void GlobalToPrivateDirectB(const __global real* restrict bgms, real bpm[NWID], + const int b_ld, const int b_offset, const int idn, const int idk, + const int b_transpose, const int b_conjugate) { #pragma unroll for (int ni=0; ni<NWID; ++ni) { const int b_index = (b_transpose) ? (idn + ni)*b_ld + idk : idk*b_ld + (idn + ni); @@ -145,10 +145,10 @@ inline void GlobalToPrivateDirectB(const __global real* restrict bgms, real bpm[ // Loads global off-chip memory into thread-private register files. This function is specific for // loading the A input matrix. This is the same as above but now includes a bounds check. -inline void GlobalToPrivateCheckedA(const __global real* restrict agms, real apm[MWID], - const int a_ld, const int a_offset, const int idm, const int idk, - const int a_transpose, const int a_conjugate, - const int kSizeM) { +INLINE_FUNC void GlobalToPrivateCheckedA(const __global real* restrict agms, real apm[MWID], + const int a_ld, const int a_offset, const int idm, const int idk, + const int a_transpose, const int a_conjugate, + const int kSizeM) { #pragma unroll for (int mi=0; mi<MWID; ++mi) { if (idm + mi < kSizeM) { @@ -163,10 +163,10 @@ inline void GlobalToPrivateCheckedA(const __global real* restrict agms, real apm } // Same as above, but now for the B input matrix -inline void GlobalToPrivateCheckedB(const __global real* restrict bgms, real bpm[NWID], - const int b_ld, const int b_offset, const int idn, const int idk, - const int b_transpose, const int b_conjugate, - const int kSizeN) { +INLINE_FUNC void GlobalToPrivateCheckedB(const __global real* restrict bgms, real bpm[NWID], + const int b_ld, const int b_offset, const int idn, const int idk, + const int b_transpose, const int b_conjugate, + const int kSizeN) { #pragma unroll for (int ni=0; ni<NWID; ++ni) { if (idn + ni < kSizeN) { @@ -184,8 +184,8 @@ inline void GlobalToPrivateCheckedB(const __global real* restrict bgms, real bpm // Caches on-chip local memory into per-thread private memory (registers). This function is specific // for caching the A input matrix. -inline void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg, - const int a_transpose) { +INLINE_FUNC void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg, + const int a_transpose) { #pragma unroll for (int mi=0; mi<MWID; ++mi) { const int mg = mi + get_local_id(0)*MWID; @@ -195,8 +195,8 @@ inline void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int k } // Same as above, but now for the B input matrix -inline void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg, - const int b_transpose) { +INLINE_FUNC void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg, + const int b_transpose) { #pragma unroll for (int ni=0; ni<NWID; ++ni) { const int ng = ni + get_local_id(1)*NWID; @@ -209,10 +209,10 @@ inline void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int k // Merges the results in Cpm with the global array in Cgm. This also performs the multiplication // with the constants: Cgm = alpha*A*B + beta*Cgm = alpha*Cpm + beta*Cgm -inline void StoreResultsDirect(__global real* cgm, real cpm[NWID][MWID], - const int idm, const int idn, - const real alpha, const real beta, - const int c_ld, const int c_offset, const int c_transpose) { +INLINE_FUNC void StoreResultsDirect(__global real* cgm, real cpm[NWID][MWID], + const int idm, const int idn, + const real alpha, const real beta, + const int c_ld, const int c_offset, const int c_transpose) { #pragma unroll for (int ni=0; ni<NWID; ++ni) { #pragma unroll @@ -237,10 +237,10 @@ inline void StoreResultsDirect(__global real* cgm, real cpm[NWID][MWID], // Merges the results in Cpm with the global array in Cgm. This also performs the multiplication // with the constants: Cgm = alpha*A*B + beta*Cgm = alpha*Cpm + beta*Cgm -inline void StoreResultsChecked(__global real* cgm, real cpm[NWID][MWID], - const int idm, const int idn, const int kSizeM, const int kSizeN, - const real alpha, const real beta, - const int c_ld, const int c_offset, const int c_transpose) { +INLINE_FUNC void StoreResultsChecked(__global real* cgm, real cpm[NWID][MWID], + const int idm, const int idn, const int kSizeM, const int kSizeN, + const real alpha, const real beta, + const int c_ld, const int c_offset, const int c_transpose) { #pragma unroll for (int ni=0; ni<NWID; ++ni) { #pragma unroll diff --git a/src/kernels/level3/xgemm_direct_part2.opencl b/src/kernels/level3/xgemm_direct_part2.opencl index 3af14bff..1d9330fc 100644 --- a/src/kernels/level3/xgemm_direct_part2.opencl +++ b/src/kernels/level3/xgemm_direct_part2.opencl @@ -19,9 +19,9 @@ R"( // Caches global off-chip memory into local (shared) memory on-chip. This function is specific for // caching the A input matrix. -inline void GlobalToLocalDirectA(const __global realMD* restrict agm, __local real* alm, - const int a_ld, const int a_offset, const int kwg, - const int a_transpose, const int a_conjugate) { +INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, __local real* alm, + const int a_ld, const int a_offset, const int kwg, + const int a_transpose, const int a_conjugate) { #if MDIMCD == MDIMAD const int la0 = get_local_id(0); const int la1 = get_local_id(1); @@ -90,9 +90,9 @@ inline void GlobalToLocalDirectA(const __global realMD* restrict agm, __local re } // Same as above, but now for the B input matrix -inline void GlobalToLocalDirectB(const __global realND* restrict bgm, __local real* blm, - const int b_ld, const int b_offset, const int kwg, - const int b_transpose, const int b_conjugate) { +INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, __local real* blm, + const int b_ld, const int b_offset, const int kwg, + const int b_transpose, const int b_conjugate) { #if MDIMCD == NDIMBD const int lb0 = get_local_id(0); const int lb1 = get_local_id(1); @@ -165,9 +165,9 @@ inline void GlobalToLocalDirectB(const __global realND* restrict bgm, __local re // Caches global off-chip memory into local (shared) memory on-chip. This function is specific for // caching the A input matrix. In contrast to the functions above, this function performs doesn't // use the vector data-types. -inline void GlobalToLocalScalarA(const __global real* restrict agms, __local real* alm, - const int a_ld, const int a_offset, const int kwg, - const int a_transpose, const int a_conjugate) { +INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, __local real* alm, + const int a_ld, const int a_offset, const int kwg, + const int a_transpose, const int a_conjugate) { #if MDIMCD == MDIMAD const int la0 = get_local_id(0); const int la1 = get_local_id(1); @@ -196,9 +196,9 @@ inline void GlobalToLocalScalarA(const __global real* restrict agms, __local rea } // Same as above, but now for the B input matrix -inline void GlobalToLocalScalarB(const __global real* restrict bgms, __local real* blm, - const int b_ld, const int b_offset, const int kwg, - const int b_transpose, const int b_conjugate) { +INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, __local real* blm, + const int b_ld, const int b_offset, const int kwg, + const int b_transpose, const int b_conjugate) { #if MDIMCD == NDIMBD const int lb0 = get_local_id(0); const int lb1 = get_local_id(1); @@ -231,10 +231,10 @@ inline void GlobalToLocalScalarB(const __global real* restrict bgms, __local rea // Caches global off-chip memory into local (shared) memory on-chip. This function is specific for // caching the A input matrix. In contrast to the functions above, this function performs bounds // checks and doesn't use the vector data-types. -inline void GlobalToLocalCheckedA(const __global real* restrict agms, __local real* alm, - const int a_ld, const int a_offset, const int kwg, - const int a_transpose, const int a_conjugate, - const int kSizeM, const int kSizeK) { +INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, __local real* alm, + const int a_ld, const int a_offset, const int kwg, + const int a_transpose, const int a_conjugate, + const int kSizeM, const int kSizeK) { #if MDIMCD == MDIMAD const int la0 = get_local_id(0); const int la1 = get_local_id(1); @@ -270,10 +270,10 @@ inline void GlobalToLocalCheckedA(const __global real* restrict agms, __local re } // Same as above, but now for the B input matrix -inline void GlobalToLocalCheckedB(const __global real* restrict bgms, __local real* blm, - const int b_ld, const int b_offset, const int kwg, - const int b_transpose, const int b_conjugate, - const int kSizeN, const int kSizeK) { +INLINE_FUNC void GlobalToLocalCheckedB(const __global real* restrict bgms, __local real* blm, + const int b_ld, const int b_offset, const int kwg, + const int b_transpose, const int b_conjugate, + const int kSizeN, const int kSizeK) { #if MDIMCD == NDIMBD const int lb0 = get_local_id(0); const int lb1 = get_local_id(1); diff --git a/src/kernels/level3/xgemm_direct_part3.opencl b/src/kernels/level3/xgemm_direct_part3.opencl index c04cdeb8..b0beb614 100644 --- a/src/kernels/level3/xgemm_direct_part3.opencl +++ b/src/kernels/level3/xgemm_direct_part3.opencl @@ -18,15 +18,15 @@ R"( // ================================================================================================= // Main body of the kernel. This is the direct version without pre/post processing and restrictions. -inline void XgemmDirect(const int kSizeM, const int kSizeN, const int kSizeK, - const real_arg arg_alpha, - const real_arg arg_beta, - const __global realMD* restrict agm, const int a_offset, const int a_ld, - const __global realND* restrict bgm, const int b_offset, const int b_ld, - __global real* cgm, const int c_offset, const int c_ld, - __local real* alm, __local real* blm, - const int a_transpose, const int b_transpose, const int c_transpose, - const int a_conjugate, const int b_conjugate) { +INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSizeK, + const real_arg arg_alpha, + const real_arg arg_beta, + const __global realMD* restrict agm, const int a_offset, const int a_ld, + const __global realND* restrict bgm, const int b_offset, const int b_ld, + __global real* cgm, const int c_offset, const int c_ld, + __local real* alm, __local real* blm, + const int a_transpose, const int b_transpose, const int c_transpose, + const int a_conjugate, const int b_conjugate) { const real alpha = GetRealArg(arg_alpha); const real beta = GetRealArg(arg_beta); diff --git a/src/kernels/level3/xgemm_part1.opencl b/src/kernels/level3/xgemm_part1.opencl index d0ce06ad..07dafe13 100644 --- a/src/kernels/level3/xgemm_part1.opencl +++ b/src/kernels/level3/xgemm_part1.opencl @@ -135,7 +135,7 @@ R"( // ================================================================================================= // Initializes the accumulation registers to zero -inline void InitAccRegisters(realM cpm[NWI][MWI/VWM]) { +INLINE_FUNC void InitAccRegisters(realM cpm[NWI][MWI/VWM]) { #pragma unroll for (int mi=0; mi<MWI/VWM; ++mi) { #pragma unroll @@ -186,8 +186,8 @@ inline void InitAccRegisters(realM cpm[NWI][MWI/VWM]) { // Caches global off-chip memory into local (shared) memory on-chip. This function is specific for // caching the A input matrix. #if SA == 1 -inline void GlobalToLocalA(const __global realM* restrict agm, __local realM* alm, - const int kSizeM, const int tid, const int kwg) { +INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, __local realM* alm, + const int kSizeM, const int tid, const int kwg) { const int la0 = tid % MDIMA; const int la1 = tid / MDIMA; #pragma unroll @@ -216,8 +216,8 @@ inline void GlobalToLocalA(const __global realM* restrict agm, __local realM* al // Same as above, but now for the B input matrix #if SB == 1 -inline void GlobalToLocalB(const __global realN* restrict bgm, __local realN* blm, - const int kSizeN, const int tid, const int kwg) { +INLINE_FUNC void GlobalToLocalB(const __global realN* restrict bgm, __local realN* blm, + const int kSizeN, const int tid, const int kwg) { const int lb0 = tid % NDIMB; const int lb1 = tid / NDIMB; #pragma unroll @@ -249,8 +249,8 @@ inline void GlobalToLocalB(const __global realN* restrict bgm, __local realN* bl // Caches global off-chip memory directly into per-thread private memory (registers). This function // is specific for caching the A input matrix. #if SA == 0 -inline void GlobalToPrivateA(const __global realM* restrict agm, realM apm[MWI/VWM], - const int kSizeM, const int idk, const int kwg) { +INLINE_FUNC void GlobalToPrivateA(const __global realM* restrict agm, realM apm[MWI/VWM], + const int kSizeM, const int idk, const int kwg) { #pragma unroll for (int mi=0; mi<MWI/VWM; ++mi) { @@ -272,8 +272,8 @@ inline void GlobalToPrivateA(const __global realM* restrict agm, realM apm[MWI/V // Same as above, but now for the B input matrix #if SB == 0 -inline void GlobalToPrivateB(const __global realN* restrict bgm, realN bpm[NWI/VWN], - const int kSizeN, const int idk) { +INLINE_FUNC void GlobalToPrivateB(const __global realN* restrict bgm, realN bpm[NWI/VWN], + const int kSizeN, const int idk) { #pragma unroll for (int ni=0; ni<NWI/VWN; ++ni) { @@ -298,7 +298,7 @@ inline void GlobalToPrivateB(const __global realN* restrict bgm, realN bpm[NWI/V // Caches on-chip local memory into per-thread private memory (registers). This function is specific // for caching the A input matrix. #if SA == 1 -inline void LocalToPrivateA(__local realM* alm, realM apm[MWI/VWM], const int kg) { +INLINE_FUNC void LocalToPrivateA(__local realM* alm, realM apm[MWI/VWM], const int kg) { #pragma unroll for (int mi=0; mi<MWI/VWM; ++mi) { #if STRM == 0 @@ -313,7 +313,7 @@ inline void LocalToPrivateA(__local realM* alm, realM apm[MWI/VWM], const int kg // Same as above, but now for the B input matrix #if SB == 1 -inline void LocalToPrivateB(__local realN* blm, realN bpm[NWI/VWN], const int kg) { +INLINE_FUNC void LocalToPrivateB(__local realN* blm, realN bpm[NWI/VWN], const int kg) { #pragma unroll for (int ni=0; ni<NWI/VWN; ++ni) { #if STRN == 0 diff --git a/src/kernels/level3/xgemm_part2.opencl b/src/kernels/level3/xgemm_part2.opencl index e8234a29..06fafc8f 100644 --- a/src/kernels/level3/xgemm_part2.opencl +++ b/src/kernels/level3/xgemm_part2.opencl @@ -18,7 +18,7 @@ R"( // ================================================================================================= // The vectorised multiply-add function -inline realM MultiplyAddVector(realM cvec, const realM avec, const real bval) { +INLINE_FUNC realM MultiplyAddVector(realM cvec, const realM avec, const real bval) { #if USE_VECTOR_MAD == 1 cvec += avec * bval; #else @@ -64,7 +64,7 @@ inline realM MultiplyAddVector(realM cvec, const realM avec, const real bval) { } // Performs the actual computation: Cpm += Apm * Bpm -inline void MultiplyAccumulate(realM cpm[NWI][MWI/VWM], realM apm[MWI/VWM], realN bpm[NWI/VWN]) { +INLINE_FUNC void MultiplyAccumulate(realM cpm[NWI][MWI/VWM], realM apm[MWI/VWM], realN bpm[NWI/VWN]) { #pragma unroll for (int ni=0; ni<NWI/VWN; ++ni) { #pragma unroll @@ -115,8 +115,8 @@ inline void MultiplyAccumulate(realM cpm[NWI][MWI/VWM], realM apm[MWI/VWM], real // Merges the results in Cpm with the global array in Cgm. This also performs the multiplication // with the constants: Cgm = alpha*A*B + beta*Cgm = alpha*Cpm + beta*Cgm -inline void StoreResults(__global realM* cgm, realM cpm[NWI][MWI/VWM], const int kSizeM, - const real alpha, const real beta) { +INLINE_FUNC void StoreResults(__global realM* cgm, realM cpm[NWI][MWI/VWM], const int kSizeM, + const real alpha, const real beta) { #pragma unroll for (int ni=0; ni<NWI; ++ni) { #pragma unroll diff --git a/src/kernels/level3/xgemm_part3.opencl b/src/kernels/level3/xgemm_part3.opencl index 8ac3a3a8..3f0d590d 100644 --- a/src/kernels/level3/xgemm_part3.opencl +++ b/src/kernels/level3/xgemm_part3.opencl @@ -18,17 +18,17 @@ R"( // ================================================================================================= // Main body of the matrix-multiplication algorithm. It calls the (inlined) functions above. -inline void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK, - const __global realM* restrict agm, const __global realN* restrict bgm, - __global realM* cgm, realM cpm[NWI][MWI/VWM] - #if SA == 1 && SB == 1 - , __local realM* alm, __local realN* blm - #elif SA == 1 - , __local realM* alm - #elif SB == 1 - , __local realN* blm - #endif - ) { +INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK, + const __global realM* restrict agm, const __global realN* restrict bgm, + __global realM* cgm, realM cpm[NWI][MWI/VWM] + #if SA == 1 && SB == 1 + , __local realM* alm, __local realN* blm + #elif SA == 1 + , __local realM* alm + #elif SB == 1 + , __local realN* blm + #endif + ) { // Allocates workitem-private memory (registers) realM apm[MWI/VWM]; diff --git a/src/routine.cpp b/src/routine.cpp index 81baa590..7d4ed76f 100644 --- a/src/routine.cpp +++ b/src/routine.cpp @@ -135,7 +135,13 @@ void Routine::InitProgram(std::initializer_list<const char *> source) { // Adds the name of the routine as a define source_string += "#define ROUTINE_"+routine_name_+"\n"; - // For specific devices, use the non-IEE754 compilant OpenCL mad() instruction. This can improve + // Not all OpenCL compilers support the 'inline' keyword. The keyword is only used for devices on + // which it is known to work with all OpenCL platforms. + if (device_.IsNVIDIA() || device_.IsARM()) { + source_string += "#define USE_INLINE_KEYWORD 1\n"; + } + + // For specific devices, use the non-IEE754 compliant OpenCL mad() instruction. This can improve // performance, but might result in a reduced accuracy. if (device_.IsAMD() && device_.IsGPU()) { source_string += "#define USE_CL_MAD 1\n"; diff --git a/src/routines/common.cpp b/src/routines/common.cpp index c995dc12..5b178e53 100644 --- a/src/routines/common.cpp +++ b/src/routines/common.cpp @@ -73,4 +73,79 @@ void RunKernel(Kernel &kernel, Queue &queue, const Device &device, } // ================================================================================================= + +// Sets all elements of a matrix to a constant value +template <typename T> +void FillMatrix(Queue &queue, const Device &device, + const Program &program, const Databases &, + EventPointer event, const std::vector<Event> &waitForEvents, + const size_t m, const size_t n, const size_t ld, const size_t offset, + const Buffer<T> &dest, + const T constant_value) { + auto kernel = Kernel(program, "FillMatrix"); + kernel.SetArgument(0, static_cast<int>(m)); + kernel.SetArgument(1, static_cast<int>(n)); + kernel.SetArgument(2, static_cast<int>(ld)); + kernel.SetArgument(3, static_cast<int>(offset)); + kernel.SetArgument(4, dest()); + kernel.SetArgument(5, GetRealArg(constant_value)); + auto local = std::vector<size_t>{8, 8}; + auto global = std::vector<size_t>{Ceil(m, 8), Ceil(n, 8)}; + RunKernel(kernel, queue, device, global, local, event, waitForEvents); +} + +// Compiles the above function +template void FillMatrix<half>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const size_t, const Buffer<half>&, const half); +template void FillMatrix<float>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const size_t, const Buffer<float>&, const float); +template void FillMatrix<double>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const size_t, const Buffer<double>&, const double); +template void FillMatrix<float2>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const size_t, const Buffer<float2>&, const float2); +template void FillMatrix<double2>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const size_t, const Buffer<double2>&, const double2); + +// Sets all elements of a vector to a constant value +template <typename T> +void FillVector(Queue &queue, const Device &device, + const Program &program, const Databases &, + EventPointer event, const std::vector<Event> &waitForEvents, + const size_t n, const size_t inc, const size_t offset, + const Buffer<T> &dest, + const T constant_value) { + auto kernel = Kernel(program, "FillVector"); + kernel.SetArgument(0, static_cast<int>(n)); + kernel.SetArgument(1, static_cast<int>(inc)); + kernel.SetArgument(2, static_cast<int>(offset)); + kernel.SetArgument(3, dest()); + kernel.SetArgument(4, GetRealArg(constant_value)); + auto local = std::vector<size_t>{64}; + auto global = std::vector<size_t>{Ceil(n, 64)}; + RunKernel(kernel, queue, device, global, local, event, waitForEvents); +} + +// Compiles the above function +template void FillVector<half>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const Buffer<half>&, const half); +template void FillVector<float>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const Buffer<float>&, const float); +template void FillVector<double>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const Buffer<double>&, const double); +template void FillVector<float2>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const Buffer<float2>&, const float2); +template void FillVector<double2>(Queue&, const Device&, const Program&, const Databases&, + EventPointer, const std::vector<Event>&, const size_t, const size_t, + const size_t, const Buffer<double2>&, const double2); + +// ================================================================================================= } // namespace clblast diff --git a/src/routines/common.hpp b/src/routines/common.hpp index 28a43da5..84ccd9d2 100644 --- a/src/routines/common.hpp +++ b/src/routines/common.hpp @@ -40,18 +40,7 @@ void FillMatrix(Queue &queue, const Device &device, EventPointer event, const std::vector<Event> &waitForEvents, const size_t m, const size_t n, const size_t ld, const size_t offset, const Buffer<T> &dest, - const T constant_value) { - auto kernel = Kernel(program, "FillMatrix"); - kernel.SetArgument(0, static_cast<int>(m)); - kernel.SetArgument(1, static_cast<int>(n)); - kernel.SetArgument(2, static_cast<int>(ld)); - kernel.SetArgument(3, static_cast<int>(offset)); - kernel.SetArgument(4, dest()); - kernel.SetArgument(5, GetRealArg(constant_value)); - auto local = std::vector<size_t>{8, 8}; - auto global = std::vector<size_t>{Ceil(m, 8), Ceil(n, 8)}; - RunKernel(kernel, queue, device, global, local, event, waitForEvents); -} + const T constant_value); // Sets all elements of a vector to a constant value template <typename T> @@ -60,17 +49,7 @@ void FillVector(Queue &queue, const Device &device, EventPointer event, const std::vector<Event> &waitForEvents, const size_t n, const size_t inc, const size_t offset, const Buffer<T> &dest, - const T constant_value) { - auto kernel = Kernel(program, "FillVector"); - kernel.SetArgument(0, static_cast<int>(n)); - kernel.SetArgument(1, static_cast<int>(inc)); - kernel.SetArgument(2, static_cast<int>(offset)); - kernel.SetArgument(3, dest()); - kernel.SetArgument(4, GetRealArg(constant_value)); - auto local = std::vector<size_t>{64}; - auto global = std::vector<size_t>{Ceil(n, 64)}; - RunKernel(kernel, queue, device, global, local, event, waitForEvents); -} + const T constant_value); // ================================================================================================= diff --git a/src/routines/level3/xgemm.cpp b/src/routines/level3/xgemm.cpp index 4c8e0f79..3909c308 100644 --- a/src/routines/level3/xgemm.cpp +++ b/src/routines/level3/xgemm.cpp @@ -283,8 +283,10 @@ void Xgemm<T>::GemmDirect(const size_t m, const size_t n, const size_t k, const auto m_ceiled = Ceil(m, db_["WGD"]); const auto n_ceiled = Ceil(n, db_["WGD"]); const auto global = std::vector<size_t>{ - (m_ceiled * db_["MDIMCD"]) / db_["WGD"], - (n_ceiled * db_["NDIMCD"]) / db_["WGD"] + // CeilDiv(m * db_["MDIMCD"], db_["WGD"]), + // CeilDiv(n * db_["NDIMCD"], db_["WGD"]) + (m_ceiled * db_["MDIMCD"]) / db_["WGD"], + (n_ceiled * db_["NDIMCD"]) / db_["WGD"] }; const auto local = std::vector<size_t>{db_["MDIMCD"], db_["NDIMCD"]}; diff --git a/src/routines/levelx/xgemmbatched.cpp b/src/routines/levelx/xgemmbatched.cpp index 0fea1922..ee8448d2 100644 --- a/src/routines/levelx/xgemmbatched.cpp +++ b/src/routines/levelx/xgemmbatched.cpp @@ -94,8 +94,8 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans // Tests the matrices for validity for (auto batch = size_t{0}; batch < batch_count; ++batch) { - TestMatrixA(a_one, a_two, a_buffer, a_offsets[batch], a_ld); - TestMatrixB(b_one, b_two, b_buffer, b_offsets[batch], b_ld); + TestMatrixA(a_one, a_two, a_buffer, a_offsets[batch], a_ld, false); // don't test for invalid LD + TestMatrixB(b_one, b_two, b_buffer, b_offsets[batch], b_ld, false); // don't test for invalid LD TestMatrixC(c_one, c_two, c_buffer, c_offsets[batch], c_ld); } diff --git a/src/utilities/buffer_test.hpp b/src/utilities/buffer_test.hpp index 652ab8c6..b5693181 100644 --- a/src/utilities/buffer_test.hpp +++ b/src/utilities/buffer_test.hpp @@ -23,8 +23,8 @@ namespace clblast { // Tests matrix 'A' for validity template <typename T> void TestMatrixA(const size_t one, const size_t two, const Buffer<T> &buffer, - const size_t offset, const size_t ld) { - if (ld < one) { throw BLASError(StatusCode::kInvalidLeadDimA); } + const size_t offset, const size_t ld, const bool test_lead_dim = true) { + if (test_lead_dim && ld < one) { throw BLASError(StatusCode::kInvalidLeadDimA); } try { const auto required_size = (ld * (two - 1) + one + offset) * sizeof(T); if (buffer.GetSize() < required_size) { throw BLASError(StatusCode::kInsufficientMemoryA); } @@ -34,8 +34,8 @@ void TestMatrixA(const size_t one, const size_t two, const Buffer<T> &buffer, // Tests matrix 'B' for validity template <typename T> void TestMatrixB(const size_t one, const size_t two, const Buffer<T> &buffer, - const size_t offset, const size_t ld) { - if (ld < one) { throw BLASError(StatusCode::kInvalidLeadDimB); } + const size_t offset, const size_t ld, const bool test_lead_dim = true) { + if (test_lead_dim && ld < one) { throw BLASError(StatusCode::kInvalidLeadDimB); } try { const auto required_size = (ld * (two - 1) + one + offset) * sizeof(T); if (buffer.GetSize() < required_size) { throw BLASError(StatusCode::kInsufficientMemoryB); } diff --git a/src/utilities/utilities.cpp b/src/utilities/utilities.cpp index 95b70cd5..0cd00438 100644 --- a/src/utilities/utilities.cpp +++ b/src/utilities/utilities.cpp @@ -7,7 +7,7 @@ // Author(s): // Cedric Nugteren <www.cedricnugteren.nl> // -// This file implements the common (test) utility functions. +// This file implements the common utility functions. // // ================================================================================================= @@ -85,14 +85,6 @@ template <> double AbsoluteValue(const double2 value) { return std::sqrt(value.real() * value.real() + value.imag() * value.imag()); } -// Returns whether a scalar is close to zero -template <typename T> bool IsCloseToZero(const T value) { return (value > -SmallConstant<T>()) && (value < SmallConstant<T>()); } -template bool IsCloseToZero<float>(const float); -template bool IsCloseToZero<double>(const double); -template <> bool IsCloseToZero(const half value) { return IsCloseToZero(HalfToFloat(value)); } -template <> bool IsCloseToZero(const float2 value) { return IsCloseToZero(value.real()) || IsCloseToZero(value.imag()); } -template <> bool IsCloseToZero(const double2 value) { return IsCloseToZero(value.real()) || IsCloseToZero(value.imag()); } - // ================================================================================================= // Implements the string conversion using std::to_string if possible @@ -319,12 +311,6 @@ bool CheckArgument(const std::vector<std::string> &arguments, std::string &help, // ================================================================================================= -// Returns a random seed. This used to be implemented using 'std::random_device', but that doesn't -// always work. The chrono-timers are more reliable in that sense, but perhaps less random. -unsigned int GetRandomSeed() { - return static_cast<unsigned int>(std::chrono::system_clock::now().time_since_epoch().count()); -} - // Create a random number generator and populates a vector with samples from a random distribution template <typename T> void PopulateVector(std::vector<T> &vector, std::mt19937 &mt, std::uniform_real_distribution<double> &dist) { @@ -354,87 +340,6 @@ void PopulateVector(std::vector<half> &vector, std::mt19937 &mt, std::uniform_re // ================================================================================================= -template <typename T, typename U> -void DeviceToHost(const Arguments<U> &args, Buffers<T> &buffers, BuffersHost<T> &buffers_host, - Queue &queue, const std::vector<std::string> &names) { - for (auto &name: names) { - if (name == kBufVecX) {buffers_host.x_vec = std::vector<T>(args.x_size, static_cast<T>(0)); buffers.x_vec.Read(queue, args.x_size, buffers_host.x_vec); } - else if (name == kBufVecY) { buffers_host.y_vec = std::vector<T>(args.y_size, static_cast<T>(0)); buffers.y_vec.Read(queue, args.y_size, buffers_host.y_vec); } - else if (name == kBufMatA) { buffers_host.a_mat = std::vector<T>(args.a_size, static_cast<T>(0)); buffers.a_mat.Read(queue, args.a_size, buffers_host.a_mat); } - else if (name == kBufMatB) { buffers_host.b_mat = std::vector<T>(args.b_size, static_cast<T>(0)); buffers.b_mat.Read(queue, args.b_size, buffers_host.b_mat); } - else if (name == kBufMatC) { buffers_host.c_mat = std::vector<T>(args.c_size, static_cast<T>(0)); buffers.c_mat.Read(queue, args.c_size, buffers_host.c_mat); } - else if (name == kBufMatAP) { buffers_host.ap_mat = std::vector<T>(args.ap_size, static_cast<T>(0)); buffers.ap_mat.Read(queue, args.ap_size, buffers_host.ap_mat); } - else if (name == kBufScalar) { buffers_host.scalar = std::vector<T>(args.scalar_size, static_cast<T>(0)); buffers.scalar.Read(queue, args.scalar_size, buffers_host.scalar); } - else { throw std::runtime_error("Invalid buffer name"); } - } -} - -template <typename T, typename U> -void HostToDevice(const Arguments<U> &args, Buffers<T> &buffers, BuffersHost<T> &buffers_host, - Queue &queue, const std::vector<std::string> &names) { - for (auto &name: names) { - if (name == kBufVecX) { buffers.x_vec.Write(queue, args.x_size, buffers_host.x_vec); } - else if (name == kBufVecY) { buffers.y_vec.Write(queue, args.y_size, buffers_host.y_vec); } - else if (name == kBufMatA) { buffers.a_mat.Write(queue, args.a_size, buffers_host.a_mat); } - else if (name == kBufMatB) { buffers.b_mat.Write(queue, args.b_size, buffers_host.b_mat); } - else if (name == kBufMatC) { buffers.c_mat.Write(queue, args.c_size, buffers_host.c_mat); } - else if (name == kBufMatAP) { buffers.ap_mat.Write(queue, args.ap_size, buffers_host.ap_mat); } - else if (name == kBufScalar) { buffers.scalar.Write(queue, args.scalar_size, buffers_host.scalar); } - else { throw std::runtime_error("Invalid buffer name"); } - } -} - -// Compiles the above functions -template void DeviceToHost(const Arguments<half>&, Buffers<half>&, BuffersHost<half>&, Queue&, const std::vector<std::string>&); -template void DeviceToHost(const Arguments<float>&, Buffers<float>&, BuffersHost<float>&, Queue&, const std::vector<std::string>&); -template void DeviceToHost(const Arguments<double>&, Buffers<double>&, BuffersHost<double>&, Queue&, const std::vector<std::string>&); -template void DeviceToHost(const Arguments<float>&, Buffers<float2>&, BuffersHost<float2>&, Queue&, const std::vector<std::string>&); -template void DeviceToHost(const Arguments<double>&, Buffers<double2>&, BuffersHost<double2>&, Queue&, const std::vector<std::string>&); -template void DeviceToHost(const Arguments<float2>&, Buffers<float2>&, BuffersHost<float2>&, Queue&, const std::vector<std::string>&); -template void DeviceToHost(const Arguments<double2>&, Buffers<double2>&, BuffersHost<double2>&, Queue&, const std::vector<std::string>&); -template void HostToDevice(const Arguments<half>&, Buffers<half>&, BuffersHost<half>&, Queue&, const std::vector<std::string>&); -template void HostToDevice(const Arguments<float>&, Buffers<float>&, BuffersHost<float>&, Queue&, const std::vector<std::string>&); -template void HostToDevice(const Arguments<double>&, Buffers<double>&, BuffersHost<double>&, Queue&, const std::vector<std::string>&); -template void HostToDevice(const Arguments<float>&, Buffers<float2>&, BuffersHost<float2>&, Queue&, const std::vector<std::string>&); -template void HostToDevice(const Arguments<double>&, Buffers<double2>&, BuffersHost<double2>&, Queue&, const std::vector<std::string>&); -template void HostToDevice(const Arguments<float2>&, Buffers<float2>&, BuffersHost<float2>&, Queue&, const std::vector<std::string>&); -template void HostToDevice(const Arguments<double2>&, Buffers<double2>&, BuffersHost<double2>&, Queue&, const std::vector<std::string>&); - -// ================================================================================================= - -// Conversion between half and single-precision -std::vector<float> HalfToFloatBuffer(const std::vector<half>& source) { - auto result = std::vector<float>(source.size()); - for (auto i = size_t(0); i < source.size(); ++i) { result[i] = HalfToFloat(source[i]); } - return result; -} -void FloatToHalfBuffer(std::vector<half>& result, const std::vector<float>& source) { - for (auto i = size_t(0); i < source.size(); ++i) { result[i] = FloatToHalf(source[i]); } -} - -// As above, but now for OpenCL data-types instead of std::vectors -Buffer<float> HalfToFloatBuffer(const Buffer<half>& source, cl_command_queue queue_raw) { - const auto size = source.GetSize() / sizeof(half); - auto queue = Queue(queue_raw); - auto context = queue.GetContext(); - auto source_cpu = std::vector<half>(size); - source.Read(queue, size, source_cpu); - auto result_cpu = HalfToFloatBuffer(source_cpu); - auto result = Buffer<float>(context, size); - result.Write(queue, size, result_cpu); - return result; -} -void FloatToHalfBuffer(Buffer<half>& result, const Buffer<float>& source, cl_command_queue queue_raw) { - const auto size = source.GetSize() / sizeof(float); - auto queue = Queue(queue_raw); - auto context = queue.GetContext(); - auto source_cpu = std::vector<float>(size); - source.Read(queue, size, source_cpu); - auto result_cpu = std::vector<half>(size); - FloatToHalfBuffer(result_cpu, source_cpu); - result.Write(queue, size, result_cpu); -} - // Converts a 'real' value to a 'real argument' value to be passed to a kernel. Normally there is // no conversion, but half-precision is not supported as kernel argument so it is converted to float. template <> typename RealArg<half>::Type GetRealArg(const half value) { return HalfToFloat(value); } diff --git a/src/utilities/utilities.hpp b/src/utilities/utilities.hpp index 006450c8..a9c492f3 100644 --- a/src/utilities/utilities.hpp +++ b/src/utilities/utilities.hpp @@ -7,10 +7,9 @@ // Author(s): // Cedric Nugteren <www.cedricnugteren.nl> // -// This file provides declarations for the common (test) utility functions such as a command-line +// This file provides declarations for the common utility functions such as a command-line // argument parser. On top of this, it serves as the 'common' header, including the C++ OpenCL -// wrapper. These utilities are not only used for CLBlast, but also included as part of the tuners, -// the performance client and the correctness testers. +// wrapper. // // ================================================================================================= @@ -89,19 +88,6 @@ constexpr auto kArgPsoInfRandom = "pso_inf_random"; // Annealing tuner-specific arguments in string form constexpr auto kArgAnnMaxTemp = "ann_max_temperature"; -// The client-specific arguments in string form -constexpr auto kArgCompareclblas = "clblas"; -constexpr auto kArgComparecblas = "cblas"; -constexpr auto kArgComparecublas = "cublas"; -constexpr auto kArgStepSize = "step"; -constexpr auto kArgNumSteps = "num_steps"; -constexpr auto kArgNumRuns = "runs"; -constexpr auto kArgWarmUp = "warm_up"; - -// The test-specific arguments in string form -constexpr auto kArgFullTest = "full_test"; -constexpr auto kArgVerbose = "verbose"; - // The common arguments in string form constexpr auto kArgPlatform = "platform"; constexpr auto kArgDevice = "device"; @@ -109,6 +95,7 @@ constexpr auto kArgPrecision = "precision"; constexpr auto kArgHelp = "h"; constexpr auto kArgQuiet = "q"; constexpr auto kArgNoAbbreviations = "no_abbrv"; +constexpr auto kArgNumRuns = "runs"; // The buffer names constexpr auto kBufVecX = "X"; @@ -141,9 +128,6 @@ template <typename T> T SmallConstant(); // Returns the absolute value of a scalar (modulus in case of complex numbers) template <typename T> typename BaseType<T>::Type AbsoluteValue(const T value); -// Returns whether a scalar is close to zero -template <typename T> bool IsCloseToZero(const T value); - // ================================================================================================= // Structure containing all possible arguments for test clients, including their default values @@ -222,28 +206,6 @@ struct Arguments { bool no_abbrv = false; }; -// Structure containing all possible buffers for test clients -template <typename T> -struct Buffers { - Buffer<T> x_vec; - Buffer<T> y_vec; - Buffer<T> a_mat; - Buffer<T> b_mat; - Buffer<T> c_mat; - Buffer<T> ap_mat; - Buffer<T> scalar; -}; -template <typename T> -struct BuffersHost { - std::vector<T> x_vec; - std::vector<T> y_vec; - std::vector<T> a_mat; - std::vector<T> b_mat; - std::vector<T> c_mat; - std::vector<T> ap_mat; - std::vector<T> scalar; -}; - // ================================================================================================= // Converts a value (e.g. an integer) to a string. This also covers special cases for CLBlast @@ -278,9 +240,6 @@ bool CheckArgument(const std::vector<std::string> &arguments, std::string &help, // ================================================================================================= -// Returns a random number to be used as a seed -unsigned int GetRandomSeed(); - // Test/example data lower and upper limit constexpr auto kTestDataLowerLimit = -2.0; constexpr auto kTestDataUpperLimit = 2.0; @@ -291,26 +250,6 @@ void PopulateVector(std::vector<T> &vector, std::mt19937 &mt, std::uniform_real_ // ================================================================================================= -// Copies buffers from the OpenCL device to the host -template <typename T, typename U> -void DeviceToHost(const Arguments<U> &args, Buffers<T> &buffers, BuffersHost<T> &buffers_host, - Queue &queue, const std::vector<std::string> &names); - -// Copies buffers from the host to the OpenCL device -template <typename T, typename U> -void HostToDevice(const Arguments<U> &args, Buffers<T> &buffers, BuffersHost<T> &buffers_host, - Queue &queue, const std::vector<std::string> &names); - -// ================================================================================================= - -// Conversion between half and single-precision -std::vector<float> HalfToFloatBuffer(const std::vector<half>& source); -void FloatToHalfBuffer(std::vector<half>& result, const std::vector<float>& source); - -// As above, but now for OpenCL data-types instead of std::vectors -Buffer<float> HalfToFloatBuffer(const Buffer<half>& source, cl_command_queue queue_raw); -void FloatToHalfBuffer(Buffer<half>& result, const Buffer<float>& source, cl_command_queue queue_raw); - // Converts a 'real' value to a 'real argument' value to be passed to a kernel. Normally there is // no conversion, but half-precision is not supported as kernel argument so it is converted to float. template <typename T> struct RealArg { using Type = T; }; |