summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2018-01-11 19:42:50 +0100
committerGitHub <noreply@github.com>2018-01-11 19:42:50 +0100
commit9b084d04093fdbfb22ee4790c6b3db5c55cd2719 (patch)
treed8f8bc1b3884c0340df9f6d95b4837ed3dff8deb /src
parentc988c2cdd166ebf6d5b5ec20f445de1a95a65b16 (diff)
parent99a4df88a6d808ea77c9116ce63621503c00b57a (diff)
Merge pull request #239 from CNugteren/gemm_strided_batched
GemmStridedBatched
Diffstat (limited to 'src')
-rw-r--r--src/clblast.cpp71
-rw-r--r--src/clblast_c.cpp127
-rw-r--r--src/clblast_cuda.cpp73
-rw-r--r--src/kernels/level3/copy_pad.opencl39
-rw-r--r--src/kernels/level3/transpose_pad.opencl41
-rw-r--r--src/kernels/level3/xgemm_batched.opencl45
-rw-r--r--src/kernels/level3/xgemm_direct_batched.opencl122
-rw-r--r--src/routines/common.hpp66
-rw-r--r--src/routines/levelx/xgemmbatched.cpp61
-rw-r--r--src/routines/levelx/xgemmbatched.hpp6
-rw-r--r--src/routines/levelx/xgemmstridedbatched.cpp278
-rw-r--r--src/routines/levelx/xgemmstridedbatched.hpp66
-rw-r--r--src/routines/routines.hpp1
13 files changed, 928 insertions, 68 deletions
diff --git a/src/clblast.cpp b/src/clblast.cpp
index f5e2f1be..c4c51538 100644
--- a/src/clblast.cpp
+++ b/src/clblast.cpp
@@ -2336,6 +2336,77 @@ template StatusCode PUBLIC_API GemmBatched<half>(const Layout, const Transpose,
const size_t,
cl_command_queue*, cl_event*);
+// StridedBatched version of GEMM: SGEMMSTRIDEDBATCHED/DGEMMSTRIDEDBATCHED/CGEMMSTRIDEDBATCHED/ZGEMMSTRIDEDBATCHED/HGEMMSTRIDEDBATCHED
+template <typename T>
+StatusCode GemmStridedBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const T alpha,
+ const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride,
+ const T beta,
+ cl_mem c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ auto queue_cpp = Queue(*queue);
+ auto routine = XgemmStridedBatched<T>(queue_cpp, event);
+ routine.DoGemmStridedBatched(layout, a_transpose, b_transpose,
+ m, n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld, a_stride,
+ Buffer<T>(b_buffer), b_offset, b_ld, b_stride,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld, c_stride,
+ batch_count);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API GemmStridedBatched<float>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const float,
+ cl_mem, const size_t, const size_t, const size_t,
+ const size_t,
+ cl_command_queue*, cl_event*);
+template StatusCode PUBLIC_API GemmStridedBatched<double>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const double,
+ cl_mem, const size_t, const size_t, const size_t,
+ const size_t,
+ cl_command_queue*, cl_event*);
+template StatusCode PUBLIC_API GemmStridedBatched<float2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float2,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const float2,
+ cl_mem, const size_t, const size_t, const size_t,
+ const size_t,
+ cl_command_queue*, cl_event*);
+template StatusCode PUBLIC_API GemmStridedBatched<double2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double2,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const double2,
+ cl_mem, const size_t, const size_t, const size_t,
+ const size_t,
+ cl_command_queue*, cl_event*);
+template StatusCode PUBLIC_API GemmStridedBatched<half>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const half,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t, const size_t, const size_t,
+ const half,
+ cl_mem, const size_t, const size_t, const size_t,
+ const size_t,
+ cl_command_queue*, cl_event*);
+
// =================================================================================================
// Retrieves the required size of the temporary buffer for the GEMM kernel (optional)
diff --git a/src/clblast_c.cpp b/src/clblast_c.cpp
index 24697779..aa52cbca 100644
--- a/src/clblast_c.cpp
+++ b/src/clblast_c.cpp
@@ -3846,6 +3846,133 @@ CLBlastStatusCode CLBlastHgemmBatched(const CLBlastLayout layout, const CLBlastT
} catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
}
+// GEMM
+CLBlastStatusCode CLBlastSgemmStridedBatched(const CLBlastLayout layout, const CLBlastTranspose a_transpose, const CLBlastTranspose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const float alpha,
+ const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride,
+ const float beta,
+ cl_mem c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::GemmStridedBatched(static_cast<clblast::Layout>(layout),
+ static_cast<clblast::Transpose>(a_transpose),
+ static_cast<clblast::Transpose>(b_transpose),
+ m, n, k,
+ alpha,
+ a_buffer, a_offset, a_ld, a_stride,
+ b_buffer, b_offset, b_ld, b_stride,
+ beta,
+ c_buffer, c_offset, c_ld, c_stride,
+ batch_count,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+CLBlastStatusCode CLBlastDgemmStridedBatched(const CLBlastLayout layout, const CLBlastTranspose a_transpose, const CLBlastTranspose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const double alpha,
+ const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride,
+ const double beta,
+ cl_mem c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::GemmStridedBatched(static_cast<clblast::Layout>(layout),
+ static_cast<clblast::Transpose>(a_transpose),
+ static_cast<clblast::Transpose>(b_transpose),
+ m, n, k,
+ alpha,
+ a_buffer, a_offset, a_ld, a_stride,
+ b_buffer, b_offset, b_ld, b_stride,
+ beta,
+ c_buffer, c_offset, c_ld, c_stride,
+ batch_count,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+CLBlastStatusCode CLBlastCgemmStridedBatched(const CLBlastLayout layout, const CLBlastTranspose a_transpose, const CLBlastTranspose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const cl_float2 alpha,
+ const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride,
+ const cl_float2 beta,
+ cl_mem c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::GemmStridedBatched(static_cast<clblast::Layout>(layout),
+ static_cast<clblast::Transpose>(a_transpose),
+ static_cast<clblast::Transpose>(b_transpose),
+ m, n, k,
+ float2{alpha.s[0], alpha.s[1]},
+ a_buffer, a_offset, a_ld, a_stride,
+ b_buffer, b_offset, b_ld, b_stride,
+ float2{beta.s[0], beta.s[1]},
+ c_buffer, c_offset, c_ld, c_stride,
+ batch_count,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+CLBlastStatusCode CLBlastZgemmStridedBatched(const CLBlastLayout layout, const CLBlastTranspose a_transpose, const CLBlastTranspose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const cl_double2 alpha,
+ const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride,
+ const cl_double2 beta,
+ cl_mem c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::GemmStridedBatched(static_cast<clblast::Layout>(layout),
+ static_cast<clblast::Transpose>(a_transpose),
+ static_cast<clblast::Transpose>(b_transpose),
+ m, n, k,
+ double2{alpha.s[0], alpha.s[1]},
+ a_buffer, a_offset, a_ld, a_stride,
+ b_buffer, b_offset, b_ld, b_stride,
+ double2{beta.s[0], beta.s[1]},
+ c_buffer, c_offset, c_ld, c_stride,
+ batch_count,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+CLBlastStatusCode CLBlastHgemmStridedBatched(const CLBlastLayout layout, const CLBlastTranspose a_transpose, const CLBlastTranspose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const cl_half alpha,
+ const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride,
+ const cl_half beta,
+ cl_mem c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::GemmStridedBatched(static_cast<clblast::Layout>(layout),
+ static_cast<clblast::Transpose>(a_transpose),
+ static_cast<clblast::Transpose>(b_transpose),
+ m, n, k,
+ alpha,
+ a_buffer, a_offset, a_ld, a_stride,
+ b_buffer, b_offset, b_ld, b_stride,
+ beta,
+ c_buffer, c_offset, c_ld, c_stride,
+ batch_count,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+
// =================================================================================================
// Clears the cache of stored binaries
diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp
index 348ff3f5..0aa57087 100644
--- a/src/clblast_cuda.cpp
+++ b/src/clblast_cuda.cpp
@@ -2436,6 +2436,79 @@ template StatusCode PUBLIC_API GemmBatched<half>(const Layout, const Transpose,
const size_t,
const CUcontext, const CUdevice);
+// StridedBatched version of GEMM: SGEMMSTRIDEDBATCHED/DGEMMSTRIDEDBATCHED/CGEMMSTRIDEDBATCHED/ZGEMMSTRIDEDBATCHED/HGEMMSTRIDEDBATCHED
+template <typename T>
+StatusCode GemmStridedBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const T alpha,
+ const CUdeviceptr a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const CUdeviceptr b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride,
+ const T beta,
+ CUdeviceptr c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = XgemmStridedBatched<T>(queue_cpp, nullptr);
+ routine.DoGemmStridedBatched(layout, a_transpose, b_transpose,
+ m, n, k,
+ alpha,
+ Buffer<T>(a_buffer), a_offset, a_ld, a_stride,
+ Buffer<T>(b_buffer), b_offset, b_ld, b_stride,
+ beta,
+ Buffer<T>(c_buffer), c_offset, c_ld, c_stride,
+ batch_count);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API GemmStridedBatched<float>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const float,
+ CUdeviceptr, const size_t, const size_t, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmStridedBatched<double>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const double,
+ CUdeviceptr, const size_t, const size_t, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmStridedBatched<float2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const float2,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const float2,
+ CUdeviceptr, const size_t, const size_t, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmStridedBatched<double2>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const double2,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const double2,
+ CUdeviceptr, const size_t, const size_t, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API GemmStridedBatched<half>(const Layout, const Transpose, const Transpose,
+ const size_t, const size_t, const size_t,
+ const half,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t, const size_t, const size_t,
+ const half,
+ CUdeviceptr, const size_t, const size_t, const size_t,
+ const size_t,
+ const CUcontext, const CUdevice);
+
// =================================================================================================
// Retrieves the required size of the temporary buffer for the GEMM kernel (optional)
diff --git a/src/kernels/level3/copy_pad.opencl b/src/kernels/level3/copy_pad.opencl
index 2e191514..3d389b74 100644
--- a/src/kernels/level3/copy_pad.opencl
+++ b/src/kernels/level3/copy_pad.opencl
@@ -174,6 +174,45 @@ void CopyMatrixBatched(const int src_one, const int src_two,
#endif
// =================================================================================================
+#if defined(ROUTINE_GEMMSTRIDEDBATCHED)
+
+// Strided-batched version of the above
+__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1)))
+void CopyPadMatrixStridedBatched(const int src_one, const int src_two,
+ const int src_ld, const int src_offset,
+ const int src_stride, __global const real* restrict src,
+ const int dest_one, const int dest_two,
+ const int dest_ld, const int dest_offset,
+ const int dest_stride, __global real* dest,
+ const int do_conjugate) {
+ const int batch = get_group_id(2);
+ const int src_offset_batch = src_offset + src_stride * batch;
+ const int dest_offset_batch = dest_offset + dest_stride * batch;
+ real alpha; SetToOne(alpha);
+ _CopyPadMatrix(src_one, src_two, src_ld, src_offset_batch, src,
+ dest_one, dest_two, dest_ld, dest_offset_batch, dest,
+ alpha, do_conjugate);
+}
+
+// Strided-batched version of the above
+__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1)))
+void CopyMatrixStridedBatched(const int src_one, const int src_two,
+ const int src_ld, const int src_offset,
+ const int src_stride, __global const real* restrict src,
+ const int dest_one, const int dest_two,
+ const int dest_ld, const int dest_offset,
+ const int dest_stride, __global real* dest) {
+ const int batch = get_group_id(2);
+ const int src_offset_batch = src_offset + src_stride * batch;
+ const int dest_offset_batch = dest_offset + dest_stride * batch;
+ real alpha; SetToOne(alpha);
+ _CopyMatrix(src_one, src_two, src_ld, src_offset_batch, src,
+ dest_one, dest_two, dest_ld, dest_offset_batch, dest,
+ alpha, 0, 0, 0);
+}
+
+#endif
+// =================================================================================================
// End of the C++11 raw string literal
)"
diff --git a/src/kernels/level3/transpose_pad.opencl b/src/kernels/level3/transpose_pad.opencl
index 67c2bf72..e55a8b7c 100644
--- a/src/kernels/level3/transpose_pad.opencl
+++ b/src/kernels/level3/transpose_pad.opencl
@@ -231,6 +231,47 @@ void TransposeMatrixBatched(const int src_one, const int src_two,
#endif
// =================================================================================================
+#if defined(ROUTINE_GEMMSTRIDEDBATCHED)
+
+// Strided-batched version of the above
+__kernel __attribute__((reqd_work_group_size(PADTRA_TILE, PADTRA_TILE, 1)))
+void TransposePadMatrixStridedBatched(const int src_one, const int src_two,
+ const int src_ld, const int src_offset,
+ const int src_stride, __global const real* restrict src,
+ const int dest_one, const int dest_two,
+ const int dest_ld, const int dest_offset,
+ const int dest_stride, __global real* dest,
+ const int do_conjugate) {
+ const int batch = get_group_id(2);
+ const int src_offset_batch = src_offset + src_stride * batch;
+ const int dest_offset_batch = dest_offset + dest_stride * batch;
+ real alpha; SetToOne(alpha);
+ __local real tile[(PADTRA_WPT*PADTRA_TILE) * (PADTRA_WPT*PADTRA_TILE + PADTRA_PAD)];
+ _TransposePadMatrix(tile, src_one, src_two, src_ld, src_offset_batch, src,
+ dest_one, dest_two, dest_ld, dest_offset_batch, dest,
+ alpha, do_conjugate);
+}
+
+// Strided-batched version of the above
+__kernel __attribute__((reqd_work_group_size(PADTRA_TILE, PADTRA_TILE, 1)))
+void TransposeMatrixStridedBatched(const int src_one, const int src_two,
+ const int src_ld, const int src_offset,
+ const int src_stride, __global const real* restrict src,
+ const int dest_one, const int dest_two,
+ const int dest_ld, const int dest_offset,
+ const int dest_stride, __global real* dest) {
+ const int batch = get_group_id(2);
+ const int src_offset_batch = src_offset + src_stride * batch;
+ const int dest_offset_batch = dest_offset + dest_stride * batch;
+ real alpha; SetToOne(alpha);
+ __local real tile[(PADTRA_WPT*PADTRA_TILE) * (PADTRA_WPT*PADTRA_TILE + PADTRA_PAD)];
+ _TransposeMatrix(tile, src_one, src_two, src_ld, src_offset_batch, src,
+ dest_one, dest_two, dest_ld, dest_offset_batch, dest,
+ alpha, 0, 0, 0);
+}
+
+#endif
+// =================================================================================================
// End of the C++11 raw string literal
)"
diff --git a/src/kernels/level3/xgemm_batched.opencl b/src/kernels/level3/xgemm_batched.opencl
index 372f910b..b51e6298 100644
--- a/src/kernels/level3/xgemm_batched.opencl
+++ b/src/kernels/level3/xgemm_batched.opencl
@@ -17,8 +17,8 @@
R"(
// =================================================================================================
+#if defined(ROUTINE_GEMMBATCHED)
-// Main entry point of the kernel. This is the regular full version.
__kernel __attribute__((reqd_work_group_size(MDIMC, NDIMC, 1)))
void XgemmBatched(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas,
@@ -58,6 +58,49 @@ void XgemmBatched(const int kSizeM, const int kSizeN, const int kSizeK,
#endif
}
+#endif
+// =================================================================================================
+#if defined(ROUTINE_GEMMSTRIDEDBATCHED)
+
+__kernel __attribute__((reqd_work_group_size(MDIMC, NDIMC, 1)))
+void XgemmStridedBatched(const int kSizeM, const int kSizeN, const int kSizeK,
+ const real_arg arg_alpha, const real_arg arg_beta,
+ const __global realM* restrict agm, const int a_one, const int a_two,
+ const __global realN* restrict bgm, const int b_one, const int b_two,
+ __global realM* cgm, const int c_one, const int c_two) {
+ const int batch = get_group_id(2);
+ const real alpha = GetRealArg(arg_alpha);
+ const real beta = GetRealArg(arg_beta);
+
+ // Sets the offsets
+ const int a_offset = batch * a_one * a_two;
+ const int b_offset = batch * b_one * b_two;
+ const int c_offset = batch * c_one * c_two;
+ const __global realM* restrict agm_ = &agm[a_offset / VWM];
+ const __global realN* restrict bgm_ = &bgm[b_offset / VWN];
+ __global realM* restrict cgm_ = &cgm[c_offset / VWM];
+
+ // Allocates workgroup-private memory (local memory)
+ #if SA == 1
+ __local realM alm[KWG * MWG/VWM];
+ #endif
+ #if SB == 1
+ __local realN blm[KWG * NWG/VWN];
+ #endif
+
+ // Computes the matrix-multiplication and stores the result in global memory
+ #if SA == 1 && SB == 1
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, alpha, beta, alm, blm);
+ #elif SA == 1
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, alpha, beta, alm);
+ #elif SB == 1
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, alpha, beta, blm);
+ #else
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, alpha, beta);
+ #endif
+}
+
+#endif
// =================================================================================================
// End of the C++11 raw string literal
diff --git a/src/kernels/level3/xgemm_direct_batched.opencl b/src/kernels/level3/xgemm_direct_batched.opencl
index d946a056..d15ed31e 100644
--- a/src/kernels/level3/xgemm_direct_batched.opencl
+++ b/src/kernels/level3/xgemm_direct_batched.opencl
@@ -17,15 +17,16 @@
R"(
// =================================================================================================
+#if defined(ROUTINE_GEMMBATCHED)
// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, non-transposed]
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
- const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
- const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
- const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
- __global real* cgm, const __constant int* c_offsets, const int c_ld,
- const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
+ const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
+ const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
+ __global real* cgm, const __constant int* c_offsets, const int c_ld,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
const int batch = get_group_id(2);
const real_arg arg_alpha = arg_alphas[batch];
const real_arg arg_beta = arg_betas[batch];
@@ -42,11 +43,11 @@ void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, transposed]
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
- const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
- const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
- const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
- __global real* cgm, const __constant int* c_offsets, const int c_ld,
- const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
+ const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
+ const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
+ __global real* cgm, const __constant int* c_offsets, const int c_ld,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
const int batch = get_group_id(2);
const real_arg arg_alpha = arg_alphas[batch];
const real_arg arg_beta = arg_betas[batch];
@@ -63,11 +64,11 @@ void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
// Direct version of the batched GEMM kernel with [A, B] = [transposed, non-transposed]
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
- const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
- const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
- const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
- __global real* cgm, const __constant int* c_offsets, const int c_ld,
- const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
+ const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
+ const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
+ __global real* cgm, const __constant int* c_offsets, const int c_ld,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
const int batch = get_group_id(2);
const real_arg arg_alpha = arg_alphas[batch];
const real_arg arg_beta = arg_betas[batch];
@@ -84,11 +85,11 @@ void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
// Direct version of the batched GEMM kernel with [A, B] = [transposed, transposed]
__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
- const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
- const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
- const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
- __global real* cgm, const __constant int* c_offsets, const int c_ld,
- const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
+ const __global realMD* restrict agm, const __constant int* a_offsets, const int a_ld,
+ const __global realND* restrict bgm, const __constant int* b_offsets, const int b_ld,
+ __global real* cgm, const __constant int* c_offsets, const int c_ld,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
const int batch = get_group_id(2);
const real_arg arg_alpha = arg_alphas[batch];
const real_arg arg_beta = arg_betas[batch];
@@ -102,6 +103,87 @@ void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
alm, blm, 1, 1, c_transpose, a_conjugate, b_conjugate);
}
+#endif
+// =================================================================================================
+#if defined(ROUTINE_GEMMSTRIDEDBATCHED)
+
+// Direct version of the strided-batched GEMM kernel with [A, B] = [non-transposed, non-transposed]
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectStridedBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
+ const real_arg arg_alpha, const real_arg arg_beta,
+ const __global realMD* restrict agm, const int a_offset, const int a_ld, const int a_stride,
+ const __global realND* restrict bgm, const int b_offset, const int b_ld, const int b_stride,
+ __global real* cgm, const int c_offset, const int c_ld, const int c_stride,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const int batch = get_group_id(2);
+ const int a_offset_batch = a_offset + a_stride * batch;
+ const int b_offset_batch = b_offset + b_stride * batch;
+ const int c_offset_batch = c_offset + c_stride * batch;
+ __local real alm[WGD * (WGD + PADA)];
+ __local real blm[WGD * (WGD + PADB)];
+ XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
+ agm, a_offset_batch, a_ld, bgm, b_offset_batch, b_ld, cgm, c_offset_batch, c_ld,
+ alm, blm, 0, 0, c_transpose, a_conjugate, b_conjugate);
+}
+
+// Direct version of the strided-batched GEMM kernel with [A, B] = [non-transposed, transposed]
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectStridedBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
+ const real_arg arg_alpha, const real_arg arg_beta,
+ const __global realMD* restrict agm, const int a_offset, const int a_ld, const int a_stride,
+ const __global realND* restrict bgm, const int b_offset, const int b_ld, const int b_stride,
+ __global real* cgm, const int c_offset, const int c_ld, const int c_stride,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const int batch = get_group_id(2);
+ const int a_offset_batch = a_offset + a_stride * batch;
+ const int b_offset_batch = b_offset + b_stride * batch;
+ const int c_offset_batch = c_offset + c_stride * batch;
+ __local real alm[WGD * (WGD + PADA)];
+ __local real blm[WGD * (WGD + PADB)];
+ XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
+ agm, a_offset_batch, a_ld, bgm, b_offset_batch, b_ld, cgm, c_offset_batch, c_ld,
+ alm, blm, 0, 1, c_transpose, a_conjugate, b_conjugate);
+}
+
+// Direct version of the strided-batched GEMM kernel with [A, B] = [transposed, non-transposed]
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectStridedBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
+ const real_arg arg_alpha, const real_arg arg_beta,
+ const __global realMD* restrict agm, const int a_offset, const int a_ld, const int a_stride,
+ const __global realND* restrict bgm, const int b_offset, const int b_ld, const int b_stride,
+ __global real* cgm, const int c_offset, const int c_ld, const int c_stride,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const int batch = get_group_id(2);
+ const int a_offset_batch = a_offset + a_stride * batch;
+ const int b_offset_batch = b_offset + b_stride * batch;
+ const int c_offset_batch = c_offset + c_stride * batch;
+ __local real alm[WGD * (WGD + PADA)];
+ __local real blm[WGD * (WGD + PADB)];
+ XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
+ agm, a_offset_batch, a_ld, bgm, b_offset_batch, b_ld, cgm, c_offset_batch, c_ld,
+ alm, blm, 1, 0, c_transpose, a_conjugate, b_conjugate);
+}
+
+// Direct version of the strided-batched GEMM kernel with [A, B] = [transposed, transposed]
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void XgemmDirectStridedBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
+ const real_arg arg_alpha, const real_arg arg_beta,
+ const __global realMD* restrict agm, const int a_offset, const int a_ld, const int a_stride,
+ const __global realND* restrict bgm, const int b_offset, const int b_ld, const int b_stride,
+ __global real* cgm, const int c_offset, const int c_ld, const int c_stride,
+ const int c_transpose, const int a_conjugate, const int b_conjugate) {
+ const int batch = get_group_id(2);
+ const int a_offset_batch = a_offset + a_stride * batch;
+ const int b_offset_batch = b_offset + b_stride * batch;
+ const int c_offset_batch = c_offset + c_stride * batch;
+ __local real alm[WGD * (WGD + PADA)];
+ __local real blm[WGD * (WGD + PADB)];
+ XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta,
+ agm, a_offset_batch, a_ld, bgm, b_offset_batch, b_ld, cgm, c_offset_batch, c_ld,
+ alm, blm, 1, 1, c_transpose, a_conjugate, b_conjugate);
+}
+
+#endif
// =================================================================================================
// End of the C++11 raw string literal
diff --git a/src/routines/common.hpp b/src/routines/common.hpp
index 06d001d9..6cbe1e1b 100644
--- a/src/routines/common.hpp
+++ b/src/routines/common.hpp
@@ -239,6 +239,72 @@ void PadCopyTransposeMatrixBatched(Queue &queue, const Device &device,
}
}
+// Batched version of the above
+template <typename T>
+void PadCopyTransposeMatrixStridedBatched(Queue &queue, const Device &device,
+ const Databases &db,
+ EventPointer event, const std::vector<Event> &waitForEvents,
+ const size_t src_one, const size_t src_two,
+ const size_t src_ld, const size_t src_offset,
+ const size_t src_stride, const Buffer<T> &src,
+ const size_t dest_one, const size_t dest_two,
+ const size_t dest_ld, const size_t dest_offset,
+ const size_t dest_stride, const Buffer<T> &dest,
+ const Program &program, const bool do_pad,
+ const bool do_transpose, const bool do_conjugate,
+ const size_t batch_count) {
+
+ // Determines the right kernel
+ auto kernel_name = std::string{};
+ if (do_transpose) {
+ kernel_name = (do_pad) ? "TransposePadMatrixStridedBatched" : "TransposeMatrixStridedBatched";
+ }
+ else {
+ kernel_name = (do_pad) ? "CopyPadMatrixStridedBatched" : "CopyMatrixStridedBatched";
+ }
+
+ // Retrieves the kernel from the compiled binary
+ auto kernel = Kernel(program, kernel_name);
+
+ // Sets the kernel arguments
+ kernel.SetArgument(0, static_cast<int>(src_one));
+ kernel.SetArgument(1, static_cast<int>(src_two));
+ kernel.SetArgument(2, static_cast<int>(src_ld));
+ kernel.SetArgument(3, static_cast<int>(src_offset));
+ kernel.SetArgument(4, static_cast<int>(src_stride));
+ kernel.SetArgument(5, src());
+ kernel.SetArgument(6, static_cast<int>(dest_one));
+ kernel.SetArgument(7, static_cast<int>(dest_two));
+ kernel.SetArgument(8, static_cast<int>(dest_ld));
+ kernel.SetArgument(9, static_cast<int>(dest_offset));
+ kernel.SetArgument(10, static_cast<int>(dest_stride));
+ kernel.SetArgument(11, dest());
+ if (do_pad) {
+ kernel.SetArgument(12, static_cast<int>(do_conjugate));
+ }
+
+ // Launches the kernel and returns the error code. Uses global and local thread sizes based on
+ // parameters in the database.
+ if (do_transpose) {
+ const auto global = std::vector<size_t>{
+ Ceil(CeilDiv(dest_one, db["PADTRA_WPT"]), db["PADTRA_TILE"]),
+ Ceil(CeilDiv(dest_two, db["PADTRA_WPT"]), db["PADTRA_TILE"]),
+ batch_count
+ };
+ const auto local = std::vector<size_t>{db["PADTRA_TILE"], db["PADTRA_TILE"], 1};
+ RunKernel(kernel, queue, device, global, local, event, waitForEvents);
+ }
+ else {
+ const auto global = std::vector<size_t>{
+ Ceil(CeilDiv(dest_one, db["PAD_WPTX"]), db["PAD_DIMX"]),
+ Ceil(CeilDiv(dest_two, db["PAD_WPTY"]), db["PAD_DIMY"]),
+ batch_count
+ };
+ const auto local = std::vector<size_t>{db["PAD_DIMX"], db["PAD_DIMY"], 1};
+ RunKernel(kernel, queue, device, global, local, event, waitForEvents);
+ }
+}
+
// =================================================================================================
} // namespace clblast
diff --git a/src/routines/levelx/xgemmbatched.cpp b/src/routines/levelx/xgemmbatched.cpp
index 8ce2dedc..1c0953e8 100644
--- a/src/routines/levelx/xgemmbatched.cpp
+++ b/src/routines/levelx/xgemmbatched.cpp
@@ -12,6 +12,7 @@
// =================================================================================================
#include "routines/levelx/xgemmbatched.hpp"
+#include "routines/level3/xgemm.hpp"
#include <string>
#include <vector>
@@ -64,34 +65,12 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans
throw BLASError(StatusCode::kInvalidBatchCount);
}
- // Makes sure all dimensions are larger than zero
- if ((m == 0) || (n == 0) || (k == 0)) { throw BLASError(StatusCode::kInvalidDimension); }
-
- // Computes whether or not the matrices are transposed in memory. See GEMM routine for details.
- const auto a_rotated = (layout == Layout::kColMajor && a_transpose != Transpose::kNo) ||
- (layout == Layout::kRowMajor && a_transpose == Transpose::kNo);
- const auto b_rotated = (layout == Layout::kColMajor && b_transpose != Transpose::kNo) ||
- (layout == Layout::kRowMajor && b_transpose == Transpose::kNo);
- const auto c_rotated = (layout == Layout::kRowMajor);
- static const auto a_want_rotated = false;
- static const auto b_want_rotated = true;
- static const auto c_want_rotated = false;
- const auto a_do_transpose = a_rotated != a_want_rotated;
- const auto b_do_transpose = b_rotated != b_want_rotated;
- const auto c_do_transpose = c_rotated != c_want_rotated;
-
- // In case of complex data-types, the transpose can also become a conjugate transpose
- const auto a_conjugate = (a_transpose == Transpose::kConjugate);
- const auto b_conjugate = (b_transpose == Transpose::kConjugate);
-
- // Computes the first and second dimensions of the 3 matrices taking into account whether the
- // matrices are rotated or not
- const auto a_one = (a_rotated) ? k : m;
- const auto a_two = (a_rotated) ? m : k;
- const auto b_one = (b_rotated) ? n : k;
- const auto b_two = (b_rotated) ? k : n;
- const auto c_one = (c_rotated) ? n : m;
- const auto c_two = (c_rotated) ? m : n;
+ // Computes the transpose/conjugate options and sets the a/b/c sizes based on that
+ bool a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate;
+ size_t a_one, a_two, b_one, b_two, c_one, c_two;
+ Xgemm<T>::ProcessArguments(layout, a_transpose, b_transpose, m, n, k,
+ a_one, a_two, b_one, b_two, c_one, c_two,
+ a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate);
// Tests the matrices for validity
for (auto batch = size_t{0}; batch < batch_count; ++batch) {
@@ -130,10 +109,7 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans
a_buffer, a_offsets_int, a_ld, b_buffer, b_offsets_int, b_ld,
betas_device, c_buffer, c_offsets_int, c_ld,
a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate,
- a_one, a_two, a_want_rotated,
- b_one, b_two, b_want_rotated,
- c_one, c_two, c_want_rotated,
- batch_count);
+ a_one, a_two, b_one, b_two, c_one, c_two, batch_count);
}
}
@@ -152,9 +128,9 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
const Buffer<T> &c_buffer, const std::vector<int> &c_offsets, const size_t c_ld,
const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
const bool a_conjugate, const bool b_conjugate,
- const size_t a_one, const size_t a_two, const bool a_want_rotated,
- const size_t b_one, const size_t b_two, const bool b_want_rotated,
- const size_t c_one, const size_t c_two, const bool c_want_rotated,
+ const size_t a_one, const size_t a_two,
+ const size_t b_one, const size_t b_two,
+ const size_t c_one, const size_t c_two,
const size_t batch_count) {
// Calculates the ceiled versions of m, n, and k
const auto m_ceiled = Ceil(Ceil(m, db_["MWG"]), db_["VWM"]);
@@ -163,12 +139,9 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
// Computes the first and second "internal" (ceiled) dimensions of the 3 matrices taking into account
// whether the matrices need to be rotated or not for the kernel.
- const auto a_one_i = (a_want_rotated) ? k_ceiled : m_ceiled;
- const auto a_two_i = (a_want_rotated) ? m_ceiled : k_ceiled;
- const auto b_one_i = (b_want_rotated) ? n_ceiled : k_ceiled;
- const auto b_two_i = (b_want_rotated) ? k_ceiled : n_ceiled;
- const auto c_one_i = (c_want_rotated) ? n_ceiled : m_ceiled;
- const auto c_two_i = (c_want_rotated) ? m_ceiled : n_ceiled;
+ size_t a_one_i, a_two_i, b_one_i, b_two_i, c_one_i, c_two_i;
+ Xgemm<T>::CalculateInternalDimensions(m, n, k, db_["MWG"], db_["NWG"], db_["KWG"],
+ a_one_i, a_two_i, b_one_i, b_two_i, c_one_i, c_two_i);
// Sets the "internal" offsets, i.e. the perfect offsets
auto a_offsets_i = std::vector<int>(batch_count);
@@ -182,11 +155,11 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
// Determines whether or not temporary matrices are needed
auto a_no_temp = a_one == a_one_i && a_two == a_two_i && a_ld == a_one && a_offsets == a_offsets_i &&
- a_do_transpose == false && a_conjugate == false;
+ !a_do_transpose && !a_conjugate;
auto b_no_temp = b_one == b_one_i && b_two == b_two_i && b_ld == b_one && b_offsets == b_offsets_i &&
- b_do_transpose == false && b_conjugate == false;
+ !b_do_transpose && !b_conjugate;
auto c_no_temp = c_one == c_one_i && c_two == c_two_i && c_ld == c_one && c_offsets == c_offsets_i &&
- c_do_transpose == false;
+ !c_do_transpose;
// Creates the temporary matrices
const auto a_temp = (a_no_temp) ? a_buffer : Buffer<T>(context_, batch_count * a_one_i * a_two_i);
diff --git a/src/routines/levelx/xgemmbatched.hpp b/src/routines/levelx/xgemmbatched.hpp
index 6136dd5f..989f3815 100644
--- a/src/routines/levelx/xgemmbatched.hpp
+++ b/src/routines/levelx/xgemmbatched.hpp
@@ -48,9 +48,9 @@ class XgemmBatched: public Routine {
const Buffer<T> &c_buffer, const std::vector<int> &c_offsets, const size_t c_ld,
const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
const bool a_conjugate, const bool b_conjugate,
- const size_t a_one, const size_t a_two, const bool a_want_rotated,
- const size_t b_one, const size_t b_two, const bool b_want_rotated,
- const size_t c_one, const size_t c_two, const bool c_want_rotated,
+ const size_t a_one, const size_t a_two,
+ const size_t b_one, const size_t b_two,
+ const size_t c_one, const size_t c_two,
const size_t batch_count);
// Direct version of batched GEMM (no pre and post-processing kernels)
diff --git a/src/routines/levelx/xgemmstridedbatched.cpp b/src/routines/levelx/xgemmstridedbatched.cpp
new file mode 100644
index 00000000..affbceee
--- /dev/null
+++ b/src/routines/levelx/xgemmstridedbatched.cpp
@@ -0,0 +1,278 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the XgemmStridedBatched class (see the header for information about the class).
+//
+// =================================================================================================
+
+#include "routines/levelx/xgemmstridedbatched.hpp"
+#include "routines/level3/xgemm.hpp"
+
+#include <string>
+#include <vector>
+
+namespace clblast {
+// =================================================================================================
+
+// Constructor: forwards to base class constructor
+template <typename T>
+XgemmStridedBatched<T>::XgemmStridedBatched(Queue &queue, EventPointer event, const std::string &name):
+ Routine(queue, event, name, {"Copy","Pad","Transpose","Padtranspose","Xgemm","XgemmDirect","GemmRoutine"},
+ PrecisionValue<T>(), {}, {
+ #include "../../kernels/level3/level3.opencl"
+ #include "../../kernels/level3/copy_fast.opencl"
+ #include "../../kernels/level3/copy_pad.opencl"
+ #include "../../kernels/level3/transpose_fast.opencl"
+ #include "../../kernels/level3/transpose_pad.opencl"
+ , // separated in multiple parts to prevent C1091 in MSVC 2013
+ #include "../../kernels/level3/xgemm_direct_part1.opencl"
+ #include "../../kernels/level3/xgemm_direct_part2.opencl"
+ #include "../../kernels/level3/xgemm_direct_part3.opencl"
+ , // separated in multiple parts to prevent C1091 in MSVC 2013
+ #include "../../kernels/level3/xgemm_part1.opencl"
+ #include "../../kernels/level3/xgemm_part2.opencl"
+ #include "../../kernels/level3/xgemm_part3.opencl"
+ #include "../../kernels/level3/xgemm_part4.opencl"
+ , // separated in multiple parts to prevent C1091 in MSVC 2013
+ #include "../../kernels/level3/xgemm_batched.opencl"
+ #include "../../kernels/level3/xgemm_direct_batched.opencl"
+ }) {
+}
+
+// =================================================================================================
+
+// The main routine
+template <typename T>
+void XgemmStridedBatched<T>::DoGemmStridedBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
+ const size_t m, const size_t n, const size_t k, const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta,
+ const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count) {
+
+ // Tests for a valid batch count
+ if (batch_count < 1) {
+ throw BLASError(StatusCode::kInvalidBatchCount);
+ }
+
+ // Computes the transpose/conjugate options and sets the a/b/c sizes based on that
+ bool a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate;
+ size_t a_one, a_two, b_one, b_two, c_one, c_two;
+ Xgemm<T>::ProcessArguments(layout, a_transpose, b_transpose, m, n, k,
+ a_one, a_two, b_one, b_two, c_one, c_two,
+ a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate);
+
+ // Tests the matrices for validity
+ for (auto batch = size_t{0}; batch < batch_count; ++batch) {
+ TestMatrixA(a_one, a_two, a_buffer, a_offset + a_stride * batch, a_ld);
+ TestMatrixB(b_one, b_two, b_buffer, b_offset + b_stride * batch, b_ld);
+ TestMatrixC(c_one, c_two, c_buffer, c_offset + c_stride * batch, c_ld);
+ }
+
+ // Selects which version of the batched GEMM to run
+ const auto do_gemm_direct = true;
+ if (do_gemm_direct) { // single generic kernel
+ BatchedGemmDirect(m, n, k, alpha,
+ a_buffer, a_offset, a_ld, a_stride,
+ b_buffer, b_offset, b_ld, b_stride, beta,
+ c_buffer, c_offset, c_ld, c_stride,
+ a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate,
+ batch_count);
+ }
+ else { // pre/post-processing plus a very fast kernel
+ BatchedGemmIndirect(m, n, k, alpha,
+ a_buffer, a_offset, a_ld, a_stride,
+ b_buffer, b_offset, b_ld, b_stride, beta,
+ c_buffer, c_offset, c_ld, c_stride,
+ a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate,
+ a_one, a_two, b_one, b_two, c_one, c_two, batch_count);
+ }
+}
+
+
+// =================================================================================================
+
+// The indirect version of batched GEMM. This uses the faster but non-general kernel. It has specific
+// requirements, but several pre and post-processing kernels take care of those. However, the
+// overhead of these extra kernels might not be ideal for certain devices/arguments.
+template <typename T>
+void XgemmStridedBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const size_t k, const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta,
+ const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t a_one, const size_t a_two,
+ const size_t b_one, const size_t b_two,
+ const size_t c_one, const size_t c_two,
+ const size_t batch_count) {
+
+ // Calculates the ceiled versions of m, n, and k
+ const auto m_ceiled = Ceil(Ceil(m, db_["MWG"]), db_["VWM"]);
+ const auto n_ceiled = Ceil(Ceil(n, db_["NWG"]), db_["VWN"]);
+ const auto k_ceiled = Ceil(Ceil(k, db_["KWG"]), db_["VWM"]);
+
+ // Computes the first and second "internal" (ceiled) dimensions of the 3 matrices taking into account
+ // whether the matrices need to be rotated or not for the kernel.
+ size_t a_one_i, a_two_i, b_one_i, b_two_i, c_one_i, c_two_i;
+ Xgemm<T>::CalculateInternalDimensions(m, n, k, db_["MWG"], db_["NWG"], db_["KWG"],
+ a_one_i, a_two_i, b_one_i, b_two_i, c_one_i, c_two_i);
+
+ // Determines whether or not temporary matrices are needed
+ auto a_no_temp = a_one == a_one_i && a_two == a_two_i && a_ld == a_one && !a_do_transpose && !a_conjugate;
+ auto b_no_temp = b_one == b_one_i && b_two == b_two_i && b_ld == b_one && !b_do_transpose && !b_conjugate;
+ auto c_no_temp = c_one == c_one_i && c_two == c_two_i && c_ld == c_one && !c_do_transpose;
+
+ // Creates the temporary matrices
+ const auto a_temp = (a_no_temp) ? a_buffer : Buffer<T>(context_, batch_count * a_one_i * a_two_i);
+ const auto b_temp = (b_no_temp) ? b_buffer : Buffer<T>(context_, batch_count * b_one_i * b_two_i);
+ const auto c_temp = (c_no_temp) ? c_buffer : Buffer<T>(context_, batch_count * c_one_i * c_two_i);
+
+ // Events of all kernels (including pre/post processing kernels)
+ auto eventWaitList = std::vector<Event>();
+ auto emptyEventList = std::vector<Event>();
+
+ // Runs the pre-processing kernel for matrix A. This transposes the matrix, but also pads zeros
+ // to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In
+ // case nothing has to be done, these kernels can be skipped.
+ if (!a_no_temp) {
+ auto eventProcessA = Event();
+ PadCopyTransposeMatrixStridedBatched(queue_, device_, db_, eventProcessA.pointer(), emptyEventList,
+ a_one, a_two, a_ld, a_offset, a_stride, a_buffer,
+ a_one_i, a_two_i, a_one_i, 0, a_one_i * a_two_i, a_temp,
+ program_, true, a_do_transpose, a_conjugate, batch_count);
+ eventWaitList.push_back(eventProcessA);
+ }
+
+ // As above, but now for matrix B
+ if (!b_no_temp) {
+ auto eventProcessB = Event();
+ PadCopyTransposeMatrixStridedBatched(queue_, device_, db_, eventProcessB.pointer(), emptyEventList,
+ b_one, b_two, b_ld, b_offset, b_stride, b_buffer,
+ b_one_i, b_two_i, b_one_i, 0, b_one_i * b_two_i, b_temp,
+ program_, true, b_do_transpose, b_conjugate, batch_count);
+ eventWaitList.push_back(eventProcessB);
+ }
+
+ // As above, but now for matrix C
+ if (!c_no_temp) {
+ auto eventProcessC = Event();
+ PadCopyTransposeMatrixStridedBatched(queue_, device_, db_, eventProcessC.pointer(), emptyEventList,
+ c_one, c_two, c_ld, c_offset, c_stride, c_buffer,
+ c_one_i, c_two_i, c_one_i, 0, c_one_i * c_two_i, c_temp,
+ program_, true, c_do_transpose, false, batch_count);
+ eventWaitList.push_back(eventProcessC);
+ }
+
+ // Retrieves the Xgemm kernel from the compiled binary
+ auto kernel = Kernel(program_, "XgemmStridedBatched");
+
+ // Sets the kernel arguments
+ kernel.SetArgument(0, static_cast<int>(m_ceiled));
+ kernel.SetArgument(1, static_cast<int>(n_ceiled));
+ kernel.SetArgument(2, static_cast<int>(k_ceiled));
+ kernel.SetArgument(3, GetRealArg(alpha));
+ kernel.SetArgument(4, GetRealArg(beta));
+ kernel.SetArgument(5, a_temp());
+ kernel.SetArgument(6, static_cast<int>(a_one_i));
+ kernel.SetArgument(7, static_cast<int>(a_two_i));
+ kernel.SetArgument(8, b_temp());
+ kernel.SetArgument(9, static_cast<int>(b_one_i));
+ kernel.SetArgument(10, static_cast<int>(b_two_i));
+ kernel.SetArgument(11, c_temp());
+ kernel.SetArgument(12, static_cast<int>(c_one_i));
+ kernel.SetArgument(13, static_cast<int>(c_two_i));
+
+ // Computes the global and local thread sizes
+ const auto global = std::vector<size_t>{
+ (c_one_i * db_["MDIMC"]) / db_["MWG"],
+ (c_two_i * db_["NDIMC"]) / db_["NWG"],
+ batch_count
+ };
+ const auto local = std::vector<size_t>{db_["MDIMC"], db_["NDIMC"], 1};
+
+ // Launches the kernel
+ auto eventKernel = Event();
+ auto eventPointer = eventKernel.pointer();
+ RunKernel(kernel, queue_, device_, global, local, eventPointer, eventWaitList);
+
+ // Runs the post-processing kernel if needed
+ if (!c_no_temp) {
+ eventWaitList.push_back(eventKernel);
+ PadCopyTransposeMatrixStridedBatched(queue_, device_, db_, event_, eventWaitList,
+ c_one_i, c_two_i, c_one_i, 0, c_one_i * c_two_i, c_temp,
+ c_one, c_two, c_ld, c_offset, c_stride, c_buffer,
+ program_, false, c_do_transpose, false, batch_count);
+ }
+}
+
+// =================================================================================================
+
+// The direct version of batched GEMM, requiring just one kernel, no pre or post-processing kernels.
+template <typename T>
+void XgemmStridedBatched<T>::BatchedGemmDirect(const size_t m, const size_t n, const size_t k, const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta,
+ const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t batch_count) {
+
+ // Retrieves the proper XgemmDirect kernel from the compiled binary
+ const auto name = (a_do_transpose) ? (b_do_transpose ? "XgemmDirectStridedBatchedTT" : "XgemmDirectStridedBatchedTN") :
+ (b_do_transpose ? "XgemmDirectStridedBatchedNT" : "XgemmDirectStridedBatchedNN");
+ auto kernel = Kernel(program_, name);
+
+ // Sets the kernel arguments
+ kernel.SetArgument(0, static_cast<int>(m));
+ kernel.SetArgument(1, static_cast<int>(n));
+ kernel.SetArgument(2, static_cast<int>(k));
+ kernel.SetArgument(3, GetRealArg(alpha));
+ kernel.SetArgument(4, GetRealArg(beta));
+ kernel.SetArgument(5, a_buffer());
+ kernel.SetArgument(6, static_cast<int>(a_offset));
+ kernel.SetArgument(7, static_cast<int>(a_ld));
+ kernel.SetArgument(8, static_cast<int>(a_stride));
+ kernel.SetArgument(9, b_buffer());
+ kernel.SetArgument(10, static_cast<int>(b_offset));
+ kernel.SetArgument(11, static_cast<int>(b_ld));
+ kernel.SetArgument(12, static_cast<int>(b_stride));
+ kernel.SetArgument(13, c_buffer());
+ kernel.SetArgument(14, static_cast<int>(c_offset));
+ kernel.SetArgument(15, static_cast<int>(c_ld));
+ kernel.SetArgument(16, static_cast<int>(c_stride));
+ kernel.SetArgument(17, static_cast<int>(c_do_transpose));
+ kernel.SetArgument(18, static_cast<int>(a_conjugate));
+ kernel.SetArgument(19, static_cast<int>(b_conjugate));
+
+ // Computes the global and local thread sizes
+ const auto m_ceiled = Ceil(m, db_["WGD"]);
+ const auto n_ceiled = Ceil(n, db_["WGD"]);
+ const auto global = std::vector<size_t>{
+ (m_ceiled * db_["MDIMCD"]) / db_["WGD"],
+ (n_ceiled * db_["NDIMCD"]) / db_["WGD"],
+ batch_count
+ };
+ const auto local = std::vector<size_t>{db_["MDIMCD"], db_["NDIMCD"], 1};
+
+ // Launches the kernel
+ RunKernel(kernel, queue_, device_, global, local, event_);
+}
+
+// =================================================================================================
+
+// Compiles the templated class
+template class XgemmStridedBatched<half>;
+template class XgemmStridedBatched<float>;
+template class XgemmStridedBatched<double>;
+template class XgemmStridedBatched<float2>;
+template class XgemmStridedBatched<double2>;
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/routines/levelx/xgemmstridedbatched.hpp b/src/routines/levelx/xgemmstridedbatched.hpp
new file mode 100644
index 00000000..0dbbcb10
--- /dev/null
+++ b/src/routines/levelx/xgemmstridedbatched.hpp
@@ -0,0 +1,66 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the XgemmStridedBatched routine. This is a non-blas batched version of GEMM.
+//
+// =================================================================================================
+
+#ifndef CLBLAST_ROUTINES_XGEMMSTRIDEDBATCHED_H_
+#define CLBLAST_ROUTINES_XGEMMSTRIDEDBATCHED_H_
+
+#include <vector>
+
+#include "routine.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// See comment at top of file for a description of the class
+template <typename T>
+class XgemmStridedBatched: public Routine {
+public:
+
+ // Constructor
+ XgemmStridedBatched(Queue &queue, EventPointer event, const std::string &name = "GEMMSTRIDEDBATCHED");
+
+ // Templated-precision implementation of the routine
+ void DoGemmStridedBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose,
+ const size_t m, const size_t n, const size_t k, const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta,
+ const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const size_t batch_count);
+
+ // Indirect version of strided batched GEMM (with pre and post-processing kernels)
+ void BatchedGemmIndirect(const size_t m, const size_t n, const size_t k, const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta,
+ const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t a_one, const size_t a_two,
+ const size_t b_one, const size_t b_two,
+ const size_t c_one, const size_t c_two,
+ const size_t batch_count);
+
+ // Direct version of strided batched GEMM (no pre and post-processing kernels)
+ void BatchedGemmDirect(const size_t m, const size_t n, const size_t k, const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride,
+ const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta,
+ const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t batch_count);
+};
+
+// =================================================================================================
+} // namespace clblast
+
+// CLBLAST_ROUTINES_XGEMMSTRIDEDBATCHED_H_
+#endif
diff --git a/src/routines/routines.hpp b/src/routines/routines.hpp
index 9e7768b9..0aeff707 100644
--- a/src/routines/routines.hpp
+++ b/src/routines/routines.hpp
@@ -71,6 +71,7 @@
#include "routines/levelx/xim2col.hpp"
#include "routines/levelx/xaxpybatched.hpp"
#include "routines/levelx/xgemmbatched.hpp"
+#include "routines/levelx/xgemmstridedbatched.hpp"
// CLBLAST_ROUTINES_ROUTINES_H_
#endif