summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2018-10-14 17:27:45 +0200
committerGitHub <noreply@github.com>2018-10-14 17:27:45 +0200
commit634b2bc75c74b80cdd2c60f87472d6f8f467cbba (patch)
tree7b37028db7736a1c872fe813ee93a438784706c3 /src
parentff7bee93d375b3cf29603b2113fda95a3b920aba (diff)
parent83ba3d4b7ba3a9cb5fbd2c1ad2bb14b2addd39fb (diff)
Merge pull request #319 from CNugteren/convgemm_multi_kernel
First im2col+GEMM implementation of convolution
Diffstat (limited to 'src')
-rw-r--r--src/clblast.cpp33
-rw-r--r--src/clblast_c.cpp47
-rw-r--r--src/clblast_cuda.cpp35
-rw-r--r--src/kernels/levelx/xconvgemm_part1.opencl113
-rw-r--r--src/kernels/levelx/xconvgemm_part2.opencl281
-rw-r--r--src/routines/levelx/xconvgemm.cpp180
-rw-r--r--src/routines/levelx/xconvgemm.hpp53
-rw-r--r--src/routines/routines.hpp1
-rw-r--r--src/utilities/utilities.hpp4
9 files changed, 746 insertions, 1 deletions
diff --git a/src/clblast.cpp b/src/clblast.cpp
index 10bb8cba..0cd2f843 100644
--- a/src/clblast.cpp
+++ b/src/clblast.cpp
@@ -2252,6 +2252,39 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si
cl_mem, const size_t,
cl_command_queue*, cl_event*);
+// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/HCONVGEMM
+template <typename T>
+StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
+ const cl_mem im_buffer, const size_t im_offset,
+ const cl_mem kernel_buffer, const size_t kernel_offset,
+ cl_mem result_buffer, const size_t result_offset,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ auto queue_cpp = Queue(*queue);
+ auto routine = Xconvgemm<T>(queue_cpp, event);
+ routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
+ Buffer<T>(im_buffer), im_offset,
+ Buffer<T>(kernel_buffer), kernel_offset,
+ Buffer<T>(result_buffer), result_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t,
+ const cl_mem, const size_t,
+ cl_mem, const size_t,
+ cl_command_queue*, cl_event*);
+template StatusCode PUBLIC_API Convgemm<double>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t,
+ const cl_mem, const size_t,
+ cl_mem, const size_t,
+ cl_command_queue*, cl_event*);
+template StatusCode PUBLIC_API Convgemm<half>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const cl_mem, const size_t,
+ const cl_mem, const size_t,
+ cl_mem, const size_t,
+ cl_command_queue*, cl_event*);
+
// Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED
template <typename T>
StatusCode AxpyBatched(const size_t n,
diff --git a/src/clblast_c.cpp b/src/clblast_c.cpp
index 06a5fc67..72adb888 100644
--- a/src/clblast_c.cpp
+++ b/src/clblast_c.cpp
@@ -3679,6 +3679,53 @@ CLBlastStatusCode CLBlastHim2col(const size_t channels, const size_t height, con
} catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
}
+// CONVGEMM
+CLBlastStatusCode CLBlastSconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
+ const cl_mem im_buffer, const size_t im_offset,
+ const cl_mem kernel_buffer, const size_t kernel_offset,
+ cl_mem result_buffer, const size_t result_offset,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::Convgemm<float>(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
+ im_buffer, im_offset,
+ kernel_buffer, kernel_offset,
+ result_buffer, result_offset,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+CLBlastStatusCode CLBlastDconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
+ const cl_mem im_buffer, const size_t im_offset,
+ const cl_mem kernel_buffer, const size_t kernel_offset,
+ cl_mem result_buffer, const size_t result_offset,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::Convgemm<double>(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
+ im_buffer, im_offset,
+ kernel_buffer, kernel_offset,
+ result_buffer, result_offset,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+CLBlastStatusCode CLBlastHconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
+ const cl_mem im_buffer, const size_t im_offset,
+ const cl_mem kernel_buffer, const size_t kernel_offset,
+ cl_mem result_buffer, const size_t result_offset,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ return static_cast<CLBlastStatusCode>(
+ clblast::Convgemm<half>(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
+ im_buffer, im_offset,
+ kernel_buffer, kernel_offset,
+ result_buffer, result_offset,
+ queue, event)
+ );
+ } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); }
+}
+
// AXPY
CLBlastStatusCode CLBlastSaxpyBatched(const size_t n,
const float *alphas,
diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp
index 8927014b..f14806cb 100644
--- a/src/clblast_cuda.cpp
+++ b/src/clblast_cuda.cpp
@@ -2350,6 +2350,41 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si
CUdeviceptr, const size_t,
const CUcontext, const CUdevice);
+// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/HCONVGEMM
+template <typename T>
+StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
+ const CUdeviceptr im_buffer, const size_t im_offset,
+ const CUdeviceptr kernel_buffer, const size_t kernel_offset,
+ CUdeviceptr result_buffer, const size_t result_offset,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xconvgemm<T>(queue_cpp, nullptr);
+ routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
+ Buffer<T>(im_buffer), im_offset,
+ Buffer<T>(kernel_buffer), kernel_offset,
+ Buffer<T>(result_buffer), result_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
+}
+template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Convgemm<double>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+template StatusCode PUBLIC_API Convgemm<half>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
+ const CUdeviceptr, const size_t,
+ const CUdeviceptr, const size_t,
+ CUdeviceptr, const size_t,
+ const CUcontext, const CUdevice);
+
// Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED
template <typename T>
StatusCode AxpyBatched(const size_t n,
diff --git a/src/kernels/levelx/xconvgemm_part1.opencl b/src/kernels/levelx/xconvgemm_part1.opencl
new file mode 100644
index 00000000..abdb5324
--- /dev/null
+++ b/src/kernels/levelx/xconvgemm_part1.opencl
@@ -0,0 +1,113 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It
+// uses parameters from the direct GEMM kernel. This is the part with the loads from memory (1/2).
+// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running
+// the im2col kernel to create a 'col' temporary matrix.
+// TODO: Currently only works with 'CONVGEMM_WITH_IM2COL' set
+//
+// =================================================================================================
+
+// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
+// literal). Comment-out this line for syntax-highlighting when developing.
+R"(
+
+// =================================================================================================
+#if defined(ROUTINE_CONVGEMM) && !defined(CONVGEMM_WITH_IM2COL)
+
+// Loads global off-chip memory into thread-private register files. This function is specific for
+// loading the image input tensor. This includes a bounds check.
+INLINE_FUNC real GlobalToPrivateCheckedImage(const __global real* restrict imagegm, const int image_offset_batch,
+ const int h_id, const int w_id, const int kwg,
+ const int input_h, const int input_w, const int channels,
+ const int kernel_h, const int kernel_w,
+ const int pad_h, const int pad_w,
+ const int stride_h, const int stride_w,
+ const int dilation_h, const int dilation_w) {
+
+ // Im2col indices
+ const int kernel_2d_index = kwg % (kernel_h * kernel_w);
+ const int kw_id = kernel_2d_index % kernel_w;
+ const int kh_id = kernel_2d_index / kernel_w;
+ const int c_id = kwg / (kernel_h * kernel_w);
+ const int h_index = -pad_h + kh_id * dilation_h + stride_h * h_id;
+ const int w_index = -pad_w + kw_id * dilation_w + stride_w * w_id;
+
+ // With bounds check
+ real result;
+ if (h_index >= 0 && h_index < input_h &&
+ w_index >= 0 && w_index < input_w) {
+ const int image_index = w_index + input_w * (h_index + input_h * c_id);
+ result = imagegm[image_index + image_offset_batch];
+ }
+ else {
+ SetToZero(result);
+ }
+ return result;
+}
+
+// Loads global off-chip memory into local (shared) memory on-chip. This function is specific for
+// loading the image input tensor. This includes a bounds check.
+INLINE_FUNC real GlobalToLocalCheckedImage(const __global realMD* restrict imagegm, LOCAL_PTR real* alm,
+ const int image_offset_batch,
+ const int h_id, const int w_id, const int kwg,
+ const int input_h, const int input_w, const int channels,
+ const int kernel_h, const int kernel_w,
+ const int pad_h, const int pad_w,
+ const int stride_h, const int stride_w,
+ const int dilation_h, const int dilation_w) {
+ #if MDIMCD == MDIMAD
+ const int la0 = get_local_id(0);
+ const int la1 = get_local_id(1);
+ #else
+ const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
+ const int la0 = tid % MDIMAD;
+ const int la1 = tid / MDIMAD;
+ #endif
+ #pragma unroll
+ for (int _mia = 0; _mia < MWAD; _mia += 1) {
+ #pragma unroll
+ for (int _kia = 0; _kia < KWAD; _kia += 1) {
+
+ // Computes the indices for the global memory
+ int mg = _mia + la0*MWAD;
+ int kg = _kia + la1*KWAD;
+ int idm = mg + GetGroupID0()*WGD;
+ int idk = kg + kwg;
+
+ // Im2col indices
+ const int kernel_2d_index = idk % (kernel_h * kernel_w);
+ const int kw_id = kernel_2d_index % kernel_w;
+ const int kh_id = kernel_2d_index / kernel_w;
+ const int c_id = idk / (kernel_h * kernel_w);
+ const int h_index = -pad_h + kh_id * dilation_h + stride_h * h_id;
+ const int w_index = -pad_w + kw_id * dilation_w + stride_w * w_id;
+
+ // Loads the data from global memory into the local memory
+ if (h_index >= 0 && h_index < input_h &&
+ w_index >= 0 && w_index < input_w) {
+ const int image_index = w_index + input_w * (h_index + input_h * c_id);
+ const real result = imagegm[image_index + image_offset_batch];
+ alm[kg*(WGD + PADA) + mg] = result;
+ }
+ else {
+ SetToZero(alm[kg*(WGD + PADA) + mg]);
+ }
+ }
+ }
+}
+
+#endif
+// =================================================================================================
+
+// End of the C++11 raw string literal
+)"
+
+// =================================================================================================
diff --git a/src/kernels/levelx/xconvgemm_part2.opencl b/src/kernels/levelx/xconvgemm_part2.opencl
new file mode 100644
index 00000000..e0ac24a0
--- /dev/null
+++ b/src/kernels/levelx/xconvgemm_part2.opencl
@@ -0,0 +1,281 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It
+// uses parameters from the direct GEMM kernel. This part contains the main kernel (2/2).
+// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running
+// the im2col kernel to create a 'col' temporary matrix.
+// TODO: Currently only works with 'CONVGEMM_WITH_IM2COL' set
+//
+// =================================================================================================
+
+// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
+// literal). Comment-out this line for syntax-highlighting when developing.
+R"(
+
+// =================================================================================================
+#if defined(ROUTINE_CONVGEMM)
+
+// ConvGEMM kernel
+__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
+void Xconvgemm(const int num_patches, const int num_kernels, const int patch_size,
+ const __global realND* restrict kernelgm, const int kernel_offset,
+ __global real* resultgm, const int result_offset, const int result_stride,
+#if defined(CONVGEMM_WITH_IM2COL)
+ const __global realMD* restrict colgm, const int col_offset, const int col_stride)
+#else
+ const __global realMD* restrict imagegm, const int image_offset,
+ const int input_h, const int input_w, const int channels,
+ const int kernel_h, const int kernel_w,
+ const int pad_h, const int pad_w,
+ const int stride_h, const int stride_w,
+ const int dilation_h, const int dilation_w,
+ const int output_h, const int output_w)
+#endif
+{
+
+ // Batch offsets
+ const int batch = get_group_id(2);
+ #if defined(CONVGEMM_WITH_IM2COL)
+ const int col_offset_batch = col_offset + col_stride * batch;
+ #else
+ const int image_offset_batch = image_offset + channels * input_h * input_w * batch;
+ #endif
+ const int result_offset_batch = result_offset + result_stride * batch;
+
+ __local real alm[WGD * (WGD + PADA)];
+ __local real blm[WGD * (WGD + PADB)];
+
+ // Extra pointers to scalar versions of global memory
+ #if defined(CONVGEMM_WITH_IM2COL)
+ const __global real* restrict colgms = (const __global real* restrict) colgm;
+ #endif
+ const __global real* restrict kernelgms = (const __global real* restrict) kernelgm;
+
+ // Allocates workitem-private memory (registers)
+ #pragma promote_to_registers
+ real apd[MWID];
+ #pragma promote_to_registers
+ real bpd[NWID];
+ #pragma promote_to_registers
+ real cpd[NWID * MWID];
+
+ // Initializes the accumulation registers
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ SetToZero(cpd[_ni * MWID + _mi]);
+ }
+ }
+
+ // Global m/n indices
+ const int idm = get_local_id(0) * MWID + GetGroupID0() * WGD;
+ const int idn = get_local_id(1) * NWID + GetGroupID1() * WGD;
+ #if !defined(CONVGEMM_WITH_IM2COL)
+ const int w_id = idm % output_w;
+ const int h_id = idm / output_w;
+ #endif
+
+ // The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section
+ // processes only the main parts: output blocks of WGD by WGD.
+ if ((idm < (num_patches/WGD)*WGD) && (idn < (num_kernels/WGD)*WGD)) {
+
+ // Loops over all complete workgroup tiles (K-dimension)
+ int kwg = 0;
+ for (; kwg < (patch_size/WGD) * WGD; kwg += WGD) {
+
+ // Loads data: off-chip --> local (matrix A and B)
+ #if defined(CONVGEMM_WITH_IM2COL)
+ if (num_patches % VWMD == 0 && col_offset_batch % VWMD == 0) {
+ GlobalToLocalDirectA(colgm, alm, num_patches, col_offset_batch, kwg, false, false);
+ }
+ else {
+ GlobalToLocalScalarA(colgms, alm, num_patches, col_offset_batch, kwg, false, false);
+ }
+ #else
+ GlobalToLocalCheckedImage(imagegm, alm, image_offset_batch, h_id, w_id, kwg,
+ input_h, input_w, channels, kernel_h, kernel_w,
+ pad_h, pad_w, stride_h, stride_w,
+ dilation_h, dilation_w);
+ #endif
+ if (patch_size % VWND == 0 && kernel_offset % VWND == 0) {
+ GlobalToLocalDirectB(kernelgm, blm, patch_size, kernel_offset, kwg, true, false);
+ }
+ else {
+ GlobalToLocalScalarB(kernelgms, blm, patch_size, kernel_offset, kwg, true, false);
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ // Loops over all workitem tiles, unrolled by a factor KWID
+ for (int pwi = 0; pwi < WGD; pwi += KWID) {
+ #pragma unroll
+ for (int _pit = 0; _pit < KWID; _pit += 1) {
+ int kg = pwi + _pit;
+
+ // Loads data: local --> private (matrix A and B)
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ apd[_mi] = LocalToPrivateDirectA(alm, _mi, kg, false);
+ }
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ bpd[_ni] = LocalToPrivateDirectB(blm, _ni, kg, true);
+ }
+
+ // Performs the accumulation (Cpmd += Apmd * Bpmd)
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]);
+ }
+ }
+ }
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+ }
+
+ // Loop over the remaining part (incomplete tile in K-dimension)
+ for (; kwg < patch_size; ++kwg) {
+
+ // Loads data: off-chip --> private (matrix A and B)
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ #if defined(CONVGEMM_WITH_IM2COL)
+ apd[_mi] = GlobalToPrivateDirectA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false);
+ #else
+ apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg,
+ input_h, input_w, channels, kernel_h, kernel_w,
+ pad_h, pad_w, stride_h, stride_w,
+ dilation_h, dilation_w);
+ #endif
+ }
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ bpd[_ni] = GlobalToPrivateDirectB(kernelgms, _ni, patch_size, kernel_offset, idn, kwg, true, false);
+ }
+
+ // Performs the accumulation (Cpmd += Apmd * Bpmd)
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]);
+ }
+ }
+ }
+
+ // Stores a tile of results
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ StoreResultsDirect(resultgm, cpd[_ni * MWID + _mi], _mi, _ni, idm, idn,
+ ONE, ZERO, num_patches, result_offset_batch, false);
+ }
+ }
+ }
+
+ // Simple but slower version for the parts on the edge (incomplete tiles in M and N-dimensions)
+ else {
+ // Loops over all complete workgroup tiles (K-dimension)
+ int kwg = 0;
+ for (; kwg < (patch_size/WGD) * WGD; kwg+=WGD) {
+
+ // Loads data: off-chip --> local
+ #if defined(CONVGEMM_WITH_IM2COL)
+ GlobalToLocalCheckedA(colgms, alm, num_patches, col_offset_batch, kwg, false, false, num_patches, patch_size);
+ #else
+ GlobalToLocalCheckedImage(imagegm, alm, image_offset_batch, h_id, w_id, kwg,
+ input_h, input_w, channels, kernel_h, kernel_w,
+ pad_h, pad_w, stride_h, stride_w,
+ dilation_h, dilation_w);
+ #endif
+ GlobalToLocalCheckedB(kernelgms, blm, patch_size, kernel_offset, kwg, true, false, num_kernels, patch_size);
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ // Loops over all workitem tiles, unrolled by a factor KWID
+ for (int pwi = 0; pwi < WGD; pwi += KWID) {
+ #pragma unroll
+ for (int _pit = 0; _pit < KWID; _pit += 1) {
+ int kg = pwi + _pit;
+
+ // Loads data: local --> private
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ apd[_mi] = LocalToPrivateDirectA(alm, _mi, kg, false);
+ }
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ bpd[_ni] = LocalToPrivateDirectB(blm, _ni, kg, true);
+ }
+
+ // Performs the accumulation (C += A * B)
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]);
+ }
+ }
+ }
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+ }
+
+ // Loop over the remaining part (incomplete tile in K-dimension)
+ for (; kwg < patch_size; ++kwg) {
+
+ // Loads data: off-chip --> private
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ #if defined(CONVGEMM_WITH_IM2COL)
+ apd[_mi] = GlobalToPrivateCheckedA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false, num_patches);
+ #else
+ apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg,
+ input_h, input_w, channels, kernel_h, kernel_w,
+ pad_h, pad_w, stride_h, stride_w,
+ dilation_h, dilation_w);
+ #endif
+ }
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ bpd[_ni] = GlobalToPrivateCheckedB(kernelgms, _ni, patch_size, kernel_offset, idn, kwg, true, false, num_kernels);
+ }
+
+ // Performs the accumulation (C += A * B)
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]);
+ }
+ }
+ }
+
+ // Stores a tile of results
+ #pragma unroll
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ #pragma unroll
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ StoreResultsChecked(resultgm, cpd[_ni * MWID + _mi], _mi, _ni, idm, idn, num_patches, num_kernels,
+ ONE, ZERO, num_patches, result_offset_batch, false);
+ }
+ }
+ }
+}
+
+#endif
+// =================================================================================================
+
+// End of the C++11 raw string literal
+)"
+
+// =================================================================================================
diff --git a/src/routines/levelx/xconvgemm.cpp b/src/routines/levelx/xconvgemm.cpp
new file mode 100644
index 00000000..f26f23a7
--- /dev/null
+++ b/src/routines/levelx/xconvgemm.cpp
@@ -0,0 +1,180 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the Xconvgemm class (see the header for information about the class).
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+#include <assert.h>
+
+#include "routines/levelx/xconvgemm.hpp"
+#include "routines/levelx/xim2col.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Constructor: forwards to base class constructor
+template <typename T>
+Xconvgemm<T>::Xconvgemm(Queue &queue, EventPointer event, const std::string &name,
+ const ConvGemmMethod method):
+ Routine(queue, event, name, {"XgemmDirect"},
+ PrecisionValue<T>(), {}, {
+ (method == ConvGemmMethod::kWithIm2Col) ? "#define CONVGEMM_WITH_IM2COL\n" : "",
+ #include "../../kernels/level3/level3.opencl"
+ , // separated in multiple parts to prevent C1091 in MSVC 2013
+ #include "../../kernels/level3/xgemm_direct_part1.opencl"
+ #include "../../kernels/level3/xgemm_direct_part2.opencl"
+ #include "../../kernels/level3/xgemm_direct_part3.opencl"
+ , // separated in multiple parts to prevent C1091 in MSVC 2013
+ #include "../../kernels/levelx/xconvgemm_part1.opencl"
+ #include "../../kernels/levelx/xconvgemm_part2.opencl"
+ }),
+ method_(method) {
+}
+
+// =================================================================================================
+
+template <typename T>
+void Xconvgemm<T>::DoConvgemm(const size_t channels, const size_t height, const size_t width,
+ const size_t kernel_h, const size_t kernel_w, const size_t pad_h,
+ const size_t pad_w, const size_t stride_h, const size_t stride_w,
+ const size_t dilation_h, const size_t dilation_w,
+ const size_t num_kernels, const size_t batch_count,
+ const Buffer<T> &im_buffer, const size_t im_offset,
+ const Buffer<T> &kernel_buffer, const size_t kernel_offset,
+ const Buffer<T> &result_buffer, const size_t result_offset) {
+
+ // TODO: Implement single-kernel approach
+ assert(method_ == ConvGemmMethod::kWithIm2Col);
+
+ // Tests for a valid batch count
+ if (batch_count == 0) {
+ throw BLASError(StatusCode::kInvalidBatchCount);
+ }
+
+ // Makes sure all dimensions are larger than zero
+ if ((channels == 0) || (height == 0) || (width == 0) || (num_kernels == 0)) {
+ throw BLASError(StatusCode::kInvalidDimension);
+ }
+
+ // Sets the output height and width
+ const auto size_h = height + 2 * pad_h;
+ const auto padding_h = dilation_h * (kernel_h - 1) + 1;
+ const auto output_h = (size_h >= padding_h) ? (size_h - padding_h) / stride_h + 1 : 1;
+ const auto size_w = width + 2 * pad_w;
+ const auto padding_w = dilation_w * (kernel_w - 1) + 1;
+ const auto output_w = (size_w >= padding_w) ? (size_w - padding_w) / stride_w + 1 : 1;
+
+ // Sets other useful variables
+ const auto patch_size = kernel_h * kernel_w * channels;
+ const auto num_patches = output_h * output_w;
+
+ // Possible approach: im2col + GEMM
+ // result = GEMM(im2col(image), kernel)
+ auto col_buffer = Buffer<T>(context_, 0); // nullptr, will be optionally created later
+ if (method_ == ConvGemmMethod::kWithIm2Col) {
+
+ // Temporary col matrix
+ const auto col_size = (method_ == ConvGemmMethod::kWithIm2Col) ? patch_size * num_patches * batch_count : 1;
+ col_buffer = Buffer<T>(context_, col_size);
+
+ // Loops over each batch
+ for (auto batch_id = size_t{0}; batch_id < batch_count; ++batch_id) {
+
+ // im2col
+ const auto im_batch_offset = batch_id * channels * height * width + im_offset;
+ const auto col_batch_offset = batch_id * patch_size * num_patches;
+ auto im2col_event = Event();
+ auto im2col = Xim2col<T>(queue_, im2col_event.pointer());
+ im2col.DoIm2col(channels, height, width, kernel_h, kernel_w,
+ pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
+ im_buffer, im_batch_offset,
+ col_buffer, col_batch_offset);
+ im2col_event.WaitForCompletion();
+ }
+ }
+
+ // Strided batched GEMM: C (result) = alpha (1) * A (col) * B (kernel) + beta (0) * C (result)
+ const auto col_stride = patch_size * num_patches;
+ const auto result_stride = num_kernels * output_h * output_w;
+
+ // Tests the matrices for validity
+ TestMatrixB(patch_size, num_kernels, kernel_buffer, kernel_offset, patch_size);
+ for (auto batch = size_t{0}; batch < batch_count; ++batch) {
+ if (method_ == ConvGemmMethod::kWithIm2Col) {
+ TestMatrixA(num_patches, patch_size, col_buffer, col_stride * batch, num_patches);
+ }
+ else {
+ // TODO: check for valid image tensor
+ }
+ TestMatrixC(num_patches, num_kernels, result_buffer, result_offset + result_stride * batch, num_patches);
+ }
+
+ // Retrieves the proper XgemmDirect kernel from the compiled binary
+ auto kernel = Kernel(program_, "Xconvgemm");
+
+ // Sets the kernel arguments
+ kernel.SetArgument(0, static_cast<int>(num_patches));
+ kernel.SetArgument(1, static_cast<int>(num_kernels));
+ kernel.SetArgument(2, static_cast<int>(patch_size));
+ kernel.SetArgument(3, kernel_buffer());
+ kernel.SetArgument(4, static_cast<int>(kernel_offset));
+ kernel.SetArgument(5, result_buffer());
+ kernel.SetArgument(6, static_cast<int>(result_offset));
+ kernel.SetArgument(7, static_cast<int>(result_stride));
+ if (method_ == ConvGemmMethod::kWithIm2Col) {
+ kernel.SetArgument(8, col_buffer());
+ kernel.SetArgument(9, static_cast<int>(0));
+ kernel.SetArgument(10, static_cast<int>(col_stride));
+ }
+ if (method_ == ConvGemmMethod::kSingleKernel) {
+ kernel.SetArgument(8, im_buffer());
+ kernel.SetArgument(9, static_cast<int>(im_offset));
+ kernel.SetArgument(10, static_cast<int>(height));
+ kernel.SetArgument(11, static_cast<int>(width));
+ kernel.SetArgument(12, static_cast<int>(channels));
+ kernel.SetArgument(13, static_cast<int>(kernel_h));
+ kernel.SetArgument(14, static_cast<int>(kernel_w));
+ kernel.SetArgument(15, static_cast<int>(pad_h));
+ kernel.SetArgument(16, static_cast<int>(pad_w));
+ kernel.SetArgument(17, static_cast<int>(stride_h));
+ kernel.SetArgument(18, static_cast<int>(stride_w));
+ kernel.SetArgument(19, static_cast<int>(dilation_h));
+ kernel.SetArgument(20, static_cast<int>(dilation_w));
+ kernel.SetArgument(21, static_cast<int>(output_h));
+ kernel.SetArgument(22, static_cast<int>(output_w));
+ }
+
+ // Computes the global and local thread sizes
+ const auto m_ceiled = Ceil(num_patches, db_["WGD"]);
+ const auto n_ceiled = Ceil(num_kernels, db_["WGD"]);
+ const auto global = std::vector<size_t>{
+ (m_ceiled * db_["MDIMCD"]) / db_["WGD"],
+ (n_ceiled * db_["NDIMCD"]) / db_["WGD"],
+ batch_count
+ };
+ const auto local = std::vector<size_t>{db_["MDIMCD"], db_["NDIMCD"], 1};
+
+ // Launches the kernel
+ RunKernel(kernel, queue_, device_, global, local, event_);
+}
+
+// =================================================================================================
+
+// Compiles the templated class
+template class Xconvgemm<half>;
+template class Xconvgemm<float>;
+template class Xconvgemm<double>;
+template class Xconvgemm<float2>;
+template class Xconvgemm<double2>;
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/routines/levelx/xconvgemm.hpp b/src/routines/levelx/xconvgemm.hpp
new file mode 100644
index 00000000..9d11ccee
--- /dev/null
+++ b/src/routines/levelx/xconvgemm.hpp
@@ -0,0 +1,53 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the Xconvgemm routine. The precision is implemented as a template argument.
+// This implements batched convolution of a 4D input 'image' tensor, a 3D input 'kernel' matrix,
+// resulting in a 4D output 'result' tensor.
+//
+// =================================================================================================
+
+#ifndef CLBLAST_ROUTINES_XCONVGEMM_H_
+#define CLBLAST_ROUTINES_XCONVGEMM_H_
+
+#include "routine.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// See comment at top of file for a description of the class
+template <typename T>
+class Xconvgemm: public Routine {
+ public:
+
+ // Constructor
+ enum class ConvGemmMethod {kWithIm2Col, kSingleKernel};
+ Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM",
+ const ConvGemmMethod method = ConvGemmMethod::kWithIm2Col);
+
+ // Templated-precision implementation of the routine
+ void DoConvgemm(const size_t channels, const size_t height, const size_t width,
+ const size_t kernel_h, const size_t kernel_w,
+ const size_t pad_h, const size_t pad_w,
+ const size_t stride_h, const size_t stride_w,
+ const size_t dilation_h, const size_t dilation_w,
+ const size_t num_kernels, const size_t batch_count,
+ const Buffer<T> &im_buffer, const size_t im_offset,
+ const Buffer<T> &kernel_buffer, const size_t kernel_offset,
+ const Buffer<T> &result_buffer, const size_t result_offset);
+
+ private:
+ const ConvGemmMethod method_;
+};
+
+// =================================================================================================
+} // namespace clblast
+
+// CLBLAST_ROUTINES_XCONVGEMM_H_
+#endif
diff --git a/src/routines/routines.hpp b/src/routines/routines.hpp
index 2ab16a75..e080ed47 100644
--- a/src/routines/routines.hpp
+++ b/src/routines/routines.hpp
@@ -70,6 +70,7 @@
#include "routines/levelx/xhad.hpp"
#include "routines/levelx/xomatcopy.hpp"
#include "routines/levelx/xim2col.hpp"
+#include "routines/levelx/xconvgemm.hpp"
#include "routines/levelx/xaxpybatched.hpp"
#include "routines/levelx/xgemmbatched.hpp"
#include "routines/levelx/xgemmstridedbatched.hpp"
diff --git a/src/utilities/utilities.hpp b/src/utilities/utilities.hpp
index 37d71794..16a241af 100644
--- a/src/utilities/utilities.hpp
+++ b/src/utilities/utilities.hpp
@@ -87,6 +87,7 @@ constexpr auto kArgImaxOffset = "offimax";
constexpr auto kArgAlpha = "alpha";
constexpr auto kArgBeta = "beta";
constexpr auto kArgBatchCount = "batch_num";
+constexpr auto kArgNumKernels = "num_kernels";
// Constants for im2col
constexpr auto kArgChannels = "channels";
@@ -199,7 +200,7 @@ struct Arguments {
size_t imax_offset = 0;
T alpha = ConstantOne<T>();
T beta = ConstantOne<T>();
- // Arguments for im2col
+ // Arguments for im2col and convgemm
size_t channels = 1;
size_t height = 1;
size_t width = 1;
@@ -211,6 +212,7 @@ struct Arguments {
size_t stride_w = 1;
size_t dilation_h = 1;
size_t dilation_w = 1;
+ size_t num_kernels = 1;
// Batch-specific arguments
size_t batch_count = 1;
std::vector<size_t> x_offsets; // = {0};