diff options
-rw-r--r-- | CHANGELOG | 2 | ||||
-rw-r--r-- | CMakeLists.txt | 2 | ||||
-rw-r--r-- | doc/api.md | 68 | ||||
-rw-r--r-- | doc/routines.md | 3 | ||||
-rw-r--r-- | include/clblast.h | 8 | ||||
-rw-r--r-- | include/clblast_c.h | 17 | ||||
-rw-r--r-- | include/clblast_cuda.h | 8 | ||||
-rwxr-xr-x | scripts/generator/generator.py | 10 | ||||
-rw-r--r-- | scripts/generator/generator/routine.py | 15 | ||||
-rw-r--r-- | src/clblast.cpp | 33 | ||||
-rw-r--r-- | src/clblast_c.cpp | 47 | ||||
-rw-r--r-- | src/clblast_cuda.cpp | 35 | ||||
-rw-r--r-- | src/kernels/levelx/xconvgemm_part1.opencl | 113 | ||||
-rw-r--r-- | src/kernels/levelx/xconvgemm_part2.opencl | 281 | ||||
-rw-r--r-- | src/routines/levelx/xconvgemm.cpp | 180 | ||||
-rw-r--r-- | src/routines/levelx/xconvgemm.hpp | 53 | ||||
-rw-r--r-- | src/routines/routines.hpp | 1 | ||||
-rw-r--r-- | src/utilities/utilities.hpp | 4 | ||||
-rw-r--r-- | test/correctness/routines/levelx/xconvgemm.cpp | 24 | ||||
-rw-r--r-- | test/correctness/testblas.hpp | 20 | ||||
-rw-r--r-- | test/correctness/tester.cpp | 1 | ||||
-rw-r--r-- | test/performance/client.cpp | 4 | ||||
-rw-r--r-- | test/performance/routines/levelx/xconvgemm.cpp | 31 | ||||
-rw-r--r-- | test/routines/levelx/xconvgemm.hpp | 243 |
24 files changed, 1183 insertions, 20 deletions
@@ -7,6 +7,8 @@ Development (next version) - Fixed an issue with AMD GPUs and the new GEMMK == 1 kernel - Fixed an issue with the preprocessor and the new GEMMK == 1 kernel - Various minor fixes and enhancements +- Added non-BLAS routines: + * SCONVGEMM/DCONVGEMM/HCONVGEMM (convolution as im2col followed by batched GEMM) Version 1.4.1 - Fixed an access violation under Windows upon releasing the OpenCL program when the driver is already unloaded diff --git a/CMakeLists.txt b/CMakeLists.txt index fd201021..0f067efb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -221,7 +221,7 @@ set(LEVEL1_ROUTINES xswap xscal xcopy xaxpy xdot xdotu xdotc xnrm2 xasum xamax) set(LEVEL2_ROUTINES xgemv xgbmv xhemv xhbmv xhpmv xsymv xsbmv xspmv xtrmv xtbmv xtpmv xtrsv xger xgeru xgerc xher xhpr xher2 xhpr2 xsyr xspr xsyr2 xspr2) set(LEVEL3_ROUTINES xgemm xsymm xhemm xsyrk xherk xsyr2k xher2k xtrmm xtrsm) -set(LEVELX_ROUTINES xhad xomatcopy xim2col xaxpybatched xgemmbatched xgemmstridedbatched) +set(LEVELX_ROUTINES xhad xomatcopy xim2col xconvgemm xaxpybatched xgemmbatched xgemmstridedbatched) set(ROUTINES ${LEVEL1_ROUTINES} ${LEVEL2_ROUTINES} ${LEVEL3_ROUTINES} ${LEVELX_ROUTINES}) set(PRECISIONS 32 64 3232 6464 16) @@ -3063,10 +3063,70 @@ Arguments to IM2COL: * `const size_t stride_w`: Integer size argument. This value must be positive. * `const size_t dilation_h`: Integer size argument. This value must be positive. * `const size_t dilation_w`: Integer size argument. This value must be positive. -* `const cl_mem im_buffer`: OpenCL buffer to store the input im vector. -* `const size_t im_offset`: The offset in elements from the start of the input im vector. -* `cl_mem col_buffer`: OpenCL buffer to store the output col vector. -* `const size_t col_offset`: The offset in elements from the start of the output col vector. +* `const cl_mem im_buffer`: OpenCL buffer to store the input im tensor. +* `const size_t im_offset`: The offset in elements from the start of the input im tensor. +* `cl_mem col_buffer`: OpenCL buffer to store the output col tensor. +* `const size_t col_offset`: The offset in elements from the start of the output col tensor. +* `cl_command_queue* queue`: Pointer to an OpenCL command queue associated with a context and device to execute the routine on. +* `cl_event* event`: Pointer to an OpenCL event to be able to wait for completion of the routine's OpenCL kernel(s). This is an optional argument. + + + +xCONVGEMM: Batched convolution as GEMM (non-BLAS function) +------------- + +Integrates im2col and GEMM for batched 3D convolution, in which _im_ is the 4D input tensor (NCHW - batch-channelin-height-width), _kernel_ the 4D kernel weights tensor (KCHW - channelout-channelin-height-width), and _result_ the 4D output tensor (NCHW - batch-channelout-height-width). + +C++ API: +``` +template <typename T> +StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) +``` + +C API: +``` +CLBlastStatusCode CLBlastSconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) +CLBlastStatusCode CLBlastDconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) +CLBlastStatusCode CLBlastHconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) +``` + +Arguments to CONVGEMM: + +* `const size_t channels`: Integer size argument. This value must be positive. +* `const size_t height`: Integer size argument. This value must be positive. +* `const size_t width`: Integer size argument. This value must be positive. +* `const size_t kernel_h`: Integer size argument. This value must be positive. +* `const size_t kernel_w`: Integer size argument. This value must be positive. +* `const size_t pad_h`: Integer size argument. This value must be positive. +* `const size_t pad_w`: Integer size argument. This value must be positive. +* `const size_t stride_h`: Integer size argument. This value must be positive. +* `const size_t stride_w`: Integer size argument. This value must be positive. +* `const size_t dilation_h`: Integer size argument. This value must be positive. +* `const size_t dilation_w`: Integer size argument. This value must be positive. +* `const size_t num_kernels`: Integer size argument. This value must be positive. +* `const size_t batch_count`: Integer size argument. This value must be positive. +* `const cl_mem im_buffer`: OpenCL buffer to store the input im tensor. +* `const size_t im_offset`: The offset in elements from the start of the input im tensor. +* `const cl_mem kernel_buffer`: OpenCL buffer to store the input kernel tensor. +* `const size_t kernel_offset`: The offset in elements from the start of the input kernel tensor. +* `cl_mem result_buffer`: OpenCL buffer to store the output result tensor. +* `const size_t result_offset`: The offset in elements from the start of the output result tensor. * `cl_command_queue* queue`: Pointer to an OpenCL command queue associated with a context and device to execute the routine on. * `cl_event* event`: Pointer to an OpenCL event to be able to wait for completion of the routine's OpenCL kernel(s). This is an optional argument. diff --git a/doc/routines.md b/doc/routines.md index c5e14907..7c6a1eb9 100644 --- a/doc/routines.md +++ b/doc/routines.md @@ -93,8 +93,9 @@ In addition, some extra non-BLAS routines are also supported by CLBlast, classif | xHAD | ✔ | ✔ | ✔ | ✔ | ✔ | (Hadamard product) | xOMATCOPY | ✔ | ✔ | ✔ | ✔ | ✔ | (Out-of-place copying/transposing/scaling of matrices) | xIM2COL | ✔ | ✔ | ✔ | ✔ | ✔ | (Image to column transform as used to express convolution as GEMM) +| xCONVGEMM | ✔ | ✔ | - | - | ✔ | (Experimental, implemented as im2col followed by batched GEMM) -Some less commonly used BLAS routines are not yet supported yet by CLBlast. They are xROTG, xROTMG, xROT, xROTM, xTBSV, and xTPSV. +Some less commonly used BLAS routines are not yet supported by CLBlast. They are xROTG, xROTMG, xROT, xROTM, xTBSV, and xTPSV. Half precision (fp16) diff --git a/include/clblast.h b/include/clblast.h index ce64b37a..9a8988e7 100644 --- a/include/clblast.h +++ b/include/clblast.h @@ -636,6 +636,14 @@ StatusCode Im2col(const size_t channels, const size_t height, const size_t width cl_mem col_buffer, const size_t col_offset, cl_command_queue* queue, cl_event* event = nullptr); +// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/HCONVGEMM +template <typename T> +StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event = nullptr); + // Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED template <typename T> StatusCode AxpyBatched(const size_t n, diff --git a/include/clblast_c.h b/include/clblast_c.h index 23a3afcc..2357182c 100644 --- a/include/clblast_c.h +++ b/include/clblast_c.h @@ -1410,6 +1410,23 @@ CLBlastStatusCode PUBLIC_API CLBlastHim2col(const size_t channels, const size_t cl_mem col_buffer, const size_t col_offset, cl_command_queue* queue, cl_event* event); +// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/HCONVGEMM +CLBlastStatusCode PUBLIC_API CLBlastSconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event); +CLBlastStatusCode PUBLIC_API CLBlastDconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event); +CLBlastStatusCode PUBLIC_API CLBlastHconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event); + // Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED CLBlastStatusCode PUBLIC_API CLBlastSaxpyBatched(const size_t n, const float *alphas, diff --git a/include/clblast_cuda.h b/include/clblast_cuda.h index d82ee331..1bbd898e 100644 --- a/include/clblast_cuda.h +++ b/include/clblast_cuda.h @@ -608,6 +608,14 @@ StatusCode Im2col(const size_t channels, const size_t height, const size_t width CUdeviceptr col_buffer, const size_t col_offset, const CUcontext context, const CUdevice device); +// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/HCONVGEMM +template <typename T> +StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const CUdeviceptr im_buffer, const size_t im_offset, + const CUdeviceptr kernel_buffer, const size_t kernel_offset, + CUdeviceptr result_buffer, const size_t result_offset, + const CUcontext context, const CUdevice device); + // Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED template <typename T> StatusCode AxpyBatched(const size_t n, diff --git a/scripts/generator/generator.py b/scripts/generator/generator.py index 25a04273..c2637037 100755 --- a/scripts/generator/generator.py +++ b/scripts/generator/generator.py @@ -106,11 +106,16 @@ ammn = size_helper("layout == CLBlastLayoutRowMajor", "m", "((side == CLBlastSid bmnn = size_helper("layout == CLBlastLayoutRowMajor", "((side == CLBlastSideLeft) ? m : n)", "n", "b_ld") im = "height * width * channels" col = "height * width * channels" +imb = "height * width * channels * batch_count" +kernel = "kernel_h * kernel_w * num_kernels" +result = "height_out * width_out * num_kernels * batch_count" + # ================================================================================================== # Populates a list of routines im2col_constants = ["channels", "height", "width", "kernel_h", "kernel_w", "pad_h", "pad_w", "stride_h", "stride_w", "dilation_h", "dilation_w"] +convgemm_constants = im2col_constants + ["num_kernels", "batch_count"] ROUTINES = [ [ # Level 1: vector-vector Routine(False, True, 0, False, "1", "rotg", T, [S,D], [], [], [], ["sa","sb","sc","ss"], ["1","1","1","1"], [], "", "Generate givens plane rotation", "", []), @@ -176,6 +181,7 @@ ROUTINES = [ Routine(True, True, 0, False, "x", "had", T, [S,D,C,Z,H], ["n"], [], ["x","y"], ["z"], [xn,yn,zn], ["alpha","beta"], "", "Element-wise vector product (Hadamard)", "Performs the Hadamard element-wise product _z = alpha * x * y + beta * z_, in which _x_, _y_, and _z_ are vectors and _alpha_ and _beta_ are scalar constants.", []), Routine(True, True, 0, False, "x", "omatcopy", T, [S,D,C,Z,H], ["m","n"], ["layout","a_transpose"], ["a"], ["b"], [amn,bnma], ["alpha"], "", "Scaling and out-place transpose/copy (non-BLAS function)", "Performs scaling and out-of-place transposition/copying of matrices according to _B = alpha*op(A)_, in which _A_ is an input matrix (_m_ rows by _n_ columns), _B_ an output matrix, and _alpha_ a scalar value. The operation _op_ can be a normal matrix copy, a transposition or a conjugate transposition.", [ald_m, bld_n]), Routine(True, True, 0, False, "x", "im2col", T, [S,D,C,Z,H], im2col_constants, [], ["im"], ["col"], [im,col], [""], "", "Im2col function (non-BLAS function)", "Performs the im2col algorithm, in which _im_ is the input matrix and _col_ is the output matrix.", []), + Routine(True, True, 0, False, "x", "convgemm", T, [S,D,H], convgemm_constants, [], ["im","kernel"], ["result"], [imb,kernel,result],[""], "", "Batched convolution as GEMM (non-BLAS function)", "Integrates im2col and GEMM for batched 3D convolution, in which _im_ is the 4D input tensor (NCHW - batch-channelin-height-width), _kernel_ the 4D kernel weights tensor (KCHW - channelout-channelin-height-width), and _result_ the 4D output tensor (NCHW - batch-channelout-height-width).", []), # Batched routines: Routine(True, True, 1, False, "x", "axpy", T, [S,D,C,Z,H], ["n"], [], ["x"], ["y"], [xn,yn], ["alpha"], "", "Batched version of AXPY", "As AXPY, but multiple operations are batched together for better performance.", []), Routine(True, True, 1, False, "x", "gemm", T, [S,D,C,Z,H], ["m","n","k"], ["layout","a_transpose","b_transpose"], ["a","b"], ["c"], [amk,bkn,cmn], ["alpha","beta"], "", "Batched version of GEMM", "As GEMM, but multiple operations are batched together for better performance.", [ald_transa_m_k, bld_transb_k_n, cld_m]), @@ -230,10 +236,10 @@ def main(argv): if i == 6: body += cpp.wrapper_cublas(routine) if i == 7: - if routine.batched == 0: + if routine.batched == 0 and routine.name not in ["convgemm"]: body += cpp.clblast_netlib_c_h(routine) if i == 8: - if routine.batched == 0: + if routine.batched == 0 and routine.name not in ["convgemm"]: body += cpp.clblast_netlib_c_cc(routine) if i == 9: body += cpp.clblast_h(routine, cuda=True) diff --git a/scripts/generator/generator/routine.py b/scripts/generator/generator/routine.py index 317c8e7b..7321349d 100644 --- a/scripts/generator/generator/routine.py +++ b/scripts/generator/generator/routine.py @@ -142,6 +142,11 @@ class Routine: return ["a", "b", "c", "ap"] @staticmethod + def buffers_tensor(): + """Distinguish between vectors and matrices and tensors""" + return ["im", "col", "kernel", "result"] + + @staticmethod def routines_scalar_no_return(): return ["dotu", "dotc"] @@ -187,7 +192,7 @@ class Routine: def buffers_without_ld_inc(self): """List of buffers without 'inc' or 'ld'""" - return self.scalar_buffers_first() + self.scalar_buffers_second() + ["ap", "im", "col"] + return self.scalar_buffers_first() + self.scalar_buffers_second() + ["ap", "im", "col", "kernel", "result"] def get_buffer_type(self, name, flavour): if name in self.index_buffers(): @@ -200,7 +205,7 @@ class Routine: def no_scalars(self): """Determines whether or not this routine has scalar arguments (alpha/beta)""" - return self.scalars == [] or self.name == "im2col" + return self.scalars == [] or self.name in ["im2col", "convgemm"] def has_layout(self): """Determines whether the layout is an argument""" @@ -221,12 +226,12 @@ class Routine: """Determines which buffers go first (between alpha and beta) and which ones go after""" if self.level == "2b" or self.name == "had": return ["x", "y"] - return ["ap", "a", "b", "x", "im"] + return ["ap", "a", "b", "x", "im", "kernel"] def buffers_second(self): if self.level == "2b" or self.name == "had": return ["z", "ap", "a", "b", "c"] - return ["y", "c", "col"] + return ["y", "c", "col", "result"] def buffer(self, name): """Retrieves a variable name for a specific input/output vector/matrix (e.g. 'x')""" @@ -397,7 +402,7 @@ class Routine: prefix = "const " if (name in self.inputs) else "" inout = "input" if (name in self.inputs) else "output" if (name in self.inputs) or (name in self.outputs): - math_name = name.upper() + " matrix" if (name in self.buffers_matrix()) else name + " vector" + math_name = name.upper() + " matrix" if (name in self.buffers_matrix()) else name + " tensor" if (name in self.buffers_tensor()) else name + " vector" inc_ld_description = "Leading dimension " if (name in self.buffers_matrix()) else "Stride/increment " a = ["`" + prefix + "cl_mem " + name + "_buffer`: OpenCL buffer to store the " + inout + " " + math_name + "."] b = ["`const size_t " + self.b_star() + name + "_offset" + self.b_s() + "`: The offset" + self.b_s() + " in elements from the start of the " + inout + " " + math_name + "."] diff --git a/src/clblast.cpp b/src/clblast.cpp index 10bb8cba..0cd2f843 100644 --- a/src/clblast.cpp +++ b/src/clblast.cpp @@ -2252,6 +2252,39 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si cl_mem, const size_t, cl_command_queue*, cl_event*); +// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/HCONVGEMM +template <typename T> +StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) { + try { + auto queue_cpp = Queue(*queue); + auto routine = Xconvgemm<T>(queue_cpp, event); + routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count, + Buffer<T>(im_buffer), im_offset, + Buffer<T>(kernel_buffer), kernel_offset, + Buffer<T>(result_buffer), result_offset); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const cl_mem, const size_t, + const cl_mem, const size_t, + cl_mem, const size_t, + cl_command_queue*, cl_event*); +template StatusCode PUBLIC_API Convgemm<double>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const cl_mem, const size_t, + const cl_mem, const size_t, + cl_mem, const size_t, + cl_command_queue*, cl_event*); +template StatusCode PUBLIC_API Convgemm<half>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const cl_mem, const size_t, + const cl_mem, const size_t, + cl_mem, const size_t, + cl_command_queue*, cl_event*); + // Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED template <typename T> StatusCode AxpyBatched(const size_t n, diff --git a/src/clblast_c.cpp b/src/clblast_c.cpp index 06a5fc67..72adb888 100644 --- a/src/clblast_c.cpp +++ b/src/clblast_c.cpp @@ -3679,6 +3679,53 @@ CLBlastStatusCode CLBlastHim2col(const size_t channels, const size_t height, con } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); } } +// CONVGEMM +CLBlastStatusCode CLBlastSconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) { + try { + return static_cast<CLBlastStatusCode>( + clblast::Convgemm<float>(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count, + im_buffer, im_offset, + kernel_buffer, kernel_offset, + result_buffer, result_offset, + queue, event) + ); + } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); } +} +CLBlastStatusCode CLBlastDconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) { + try { + return static_cast<CLBlastStatusCode>( + clblast::Convgemm<double>(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count, + im_buffer, im_offset, + kernel_buffer, kernel_offset, + result_buffer, result_offset, + queue, event) + ); + } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); } +} +CLBlastStatusCode CLBlastHconvgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const cl_mem im_buffer, const size_t im_offset, + const cl_mem kernel_buffer, const size_t kernel_offset, + cl_mem result_buffer, const size_t result_offset, + cl_command_queue* queue, cl_event* event) { + try { + return static_cast<CLBlastStatusCode>( + clblast::Convgemm<half>(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count, + im_buffer, im_offset, + kernel_buffer, kernel_offset, + result_buffer, result_offset, + queue, event) + ); + } catch (...) { return static_cast<CLBlastStatusCode>(clblast::DispatchExceptionForC()); } +} + // AXPY CLBlastStatusCode CLBlastSaxpyBatched(const size_t n, const float *alphas, diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp index 8927014b..f14806cb 100644 --- a/src/clblast_cuda.cpp +++ b/src/clblast_cuda.cpp @@ -2350,6 +2350,41 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si CUdeviceptr, const size_t, const CUcontext, const CUdevice); +// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/HCONVGEMM +template <typename T> +StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count, + const CUdeviceptr im_buffer, const size_t im_offset, + const CUdeviceptr kernel_buffer, const size_t kernel_offset, + CUdeviceptr result_buffer, const size_t result_offset, + const CUcontext context, const CUdevice device) { + try { + const auto context_cpp = Context(context); + const auto device_cpp = Device(device); + auto queue_cpp = Queue(context_cpp, device_cpp); + auto routine = Xconvgemm<T>(queue_cpp, nullptr); + routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count, + Buffer<T>(im_buffer), im_offset, + Buffer<T>(kernel_buffer), kernel_offset, + Buffer<T>(result_buffer), result_offset); + return StatusCode::kSuccess; + } catch (...) { return DispatchException(); } +} +template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + const CUcontext, const CUdevice); +template StatusCode PUBLIC_API Convgemm<double>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + const CUcontext, const CUdevice); +template StatusCode PUBLIC_API Convgemm<half>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, + const CUdeviceptr, const size_t, + const CUdeviceptr, const size_t, + CUdeviceptr, const size_t, + const CUcontext, const CUdevice); + // Batched version of AXPY: SAXPYBATCHED/DAXPYBATCHED/CAXPYBATCHED/ZAXPYBATCHED/HAXPYBATCHED template <typename T> StatusCode AxpyBatched(const size_t n, diff --git a/src/kernels/levelx/xconvgemm_part1.opencl b/src/kernels/levelx/xconvgemm_part1.opencl new file mode 100644 index 00000000..abdb5324 --- /dev/null +++ b/src/kernels/levelx/xconvgemm_part1.opencl @@ -0,0 +1,113 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren <www.cedricnugteren.nl> +// +// This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It +// uses parameters from the direct GEMM kernel. This is the part with the loads from memory (1/2). +// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running +// the im2col kernel to create a 'col' temporary matrix. +// TODO: Currently only works with 'CONVGEMM_WITH_IM2COL' set +// +// ================================================================================================= + +// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string +// literal). Comment-out this line for syntax-highlighting when developing. +R"( + +// ================================================================================================= +#if defined(ROUTINE_CONVGEMM) && !defined(CONVGEMM_WITH_IM2COL) + +// Loads global off-chip memory into thread-private register files. This function is specific for +// loading the image input tensor. This includes a bounds check. +INLINE_FUNC real GlobalToPrivateCheckedImage(const __global real* restrict imagegm, const int image_offset_batch, + const int h_id, const int w_id, const int kwg, + const int input_h, const int input_w, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w) { + + // Im2col indices + const int kernel_2d_index = kwg % (kernel_h * kernel_w); + const int kw_id = kernel_2d_index % kernel_w; + const int kh_id = kernel_2d_index / kernel_w; + const int c_id = kwg / (kernel_h * kernel_w); + const int h_index = -pad_h + kh_id * dilation_h + stride_h * h_id; + const int w_index = -pad_w + kw_id * dilation_w + stride_w * w_id; + + // With bounds check + real result; + if (h_index >= 0 && h_index < input_h && + w_index >= 0 && w_index < input_w) { + const int image_index = w_index + input_w * (h_index + input_h * c_id); + result = imagegm[image_index + image_offset_batch]; + } + else { + SetToZero(result); + } + return result; +} + +// Loads global off-chip memory into local (shared) memory on-chip. This function is specific for +// loading the image input tensor. This includes a bounds check. +INLINE_FUNC real GlobalToLocalCheckedImage(const __global realMD* restrict imagegm, LOCAL_PTR real* alm, + const int image_offset_batch, + const int h_id, const int w_id, const int kwg, + const int input_h, const int input_w, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w) { + #if MDIMCD == MDIMAD + const int la0 = get_local_id(0); + const int la1 = get_local_id(1); + #else + const int tid = get_local_id(0) + MDIMCD*get_local_id(1); + const int la0 = tid % MDIMAD; + const int la1 = tid / MDIMAD; + #endif + #pragma unroll + for (int _mia = 0; _mia < MWAD; _mia += 1) { + #pragma unroll + for (int _kia = 0; _kia < KWAD; _kia += 1) { + + // Computes the indices for the global memory + int mg = _mia + la0*MWAD; + int kg = _kia + la1*KWAD; + int idm = mg + GetGroupID0()*WGD; + int idk = kg + kwg; + + // Im2col indices + const int kernel_2d_index = idk % (kernel_h * kernel_w); + const int kw_id = kernel_2d_index % kernel_w; + const int kh_id = kernel_2d_index / kernel_w; + const int c_id = idk / (kernel_h * kernel_w); + const int h_index = -pad_h + kh_id * dilation_h + stride_h * h_id; + const int w_index = -pad_w + kw_id * dilation_w + stride_w * w_id; + + // Loads the data from global memory into the local memory + if (h_index >= 0 && h_index < input_h && + w_index >= 0 && w_index < input_w) { + const int image_index = w_index + input_w * (h_index + input_h * c_id); + const real result = imagegm[image_index + image_offset_batch]; + alm[kg*(WGD + PADA) + mg] = result; + } + else { + SetToZero(alm[kg*(WGD + PADA) + mg]); + } + } + } +} + +#endif +// ================================================================================================= + +// End of the C++11 raw string literal +)" + +// ================================================================================================= diff --git a/src/kernels/levelx/xconvgemm_part2.opencl b/src/kernels/levelx/xconvgemm_part2.opencl new file mode 100644 index 00000000..e0ac24a0 --- /dev/null +++ b/src/kernels/levelx/xconvgemm_part2.opencl @@ -0,0 +1,281 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren <www.cedricnugteren.nl> +// +// This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It +// uses parameters from the direct GEMM kernel. This part contains the main kernel (2/2). +// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running +// the im2col kernel to create a 'col' temporary matrix. +// TODO: Currently only works with 'CONVGEMM_WITH_IM2COL' set +// +// ================================================================================================= + +// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string +// literal). Comment-out this line for syntax-highlighting when developing. +R"( + +// ================================================================================================= +#if defined(ROUTINE_CONVGEMM) + +// ConvGEMM kernel +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void Xconvgemm(const int num_patches, const int num_kernels, const int patch_size, + const __global realND* restrict kernelgm, const int kernel_offset, + __global real* resultgm, const int result_offset, const int result_stride, +#if defined(CONVGEMM_WITH_IM2COL) + const __global realMD* restrict colgm, const int col_offset, const int col_stride) +#else + const __global realMD* restrict imagegm, const int image_offset, + const int input_h, const int input_w, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int output_h, const int output_w) +#endif +{ + + // Batch offsets + const int batch = get_group_id(2); + #if defined(CONVGEMM_WITH_IM2COL) + const int col_offset_batch = col_offset + col_stride * batch; + #else + const int image_offset_batch = image_offset + channels * input_h * input_w * batch; + #endif + const int result_offset_batch = result_offset + result_stride * batch; + + __local real alm[WGD * (WGD + PADA)]; + __local real blm[WGD * (WGD + PADB)]; + + // Extra pointers to scalar versions of global memory + #if defined(CONVGEMM_WITH_IM2COL) + const __global real* restrict colgms = (const __global real* restrict) colgm; + #endif + const __global real* restrict kernelgms = (const __global real* restrict) kernelgm; + + // Allocates workitem-private memory (registers) + #pragma promote_to_registers + real apd[MWID]; + #pragma promote_to_registers + real bpd[NWID]; + #pragma promote_to_registers + real cpd[NWID * MWID]; + + // Initializes the accumulation registers + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + SetToZero(cpd[_ni * MWID + _mi]); + } + } + + // Global m/n indices + const int idm = get_local_id(0) * MWID + GetGroupID0() * WGD; + const int idn = get_local_id(1) * NWID + GetGroupID1() * WGD; + #if !defined(CONVGEMM_WITH_IM2COL) + const int w_id = idm % output_w; + const int h_id = idm / output_w; + #endif + + // The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section + // processes only the main parts: output blocks of WGD by WGD. + if ((idm < (num_patches/WGD)*WGD) && (idn < (num_kernels/WGD)*WGD)) { + + // Loops over all complete workgroup tiles (K-dimension) + int kwg = 0; + for (; kwg < (patch_size/WGD) * WGD; kwg += WGD) { + + // Loads data: off-chip --> local (matrix A and B) + #if defined(CONVGEMM_WITH_IM2COL) + if (num_patches % VWMD == 0 && col_offset_batch % VWMD == 0) { + GlobalToLocalDirectA(colgm, alm, num_patches, col_offset_batch, kwg, false, false); + } + else { + GlobalToLocalScalarA(colgms, alm, num_patches, col_offset_batch, kwg, false, false); + } + #else + GlobalToLocalCheckedImage(imagegm, alm, image_offset_batch, h_id, w_id, kwg, + input_h, input_w, channels, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w); + #endif + if (patch_size % VWND == 0 && kernel_offset % VWND == 0) { + GlobalToLocalDirectB(kernelgm, blm, patch_size, kernel_offset, kwg, true, false); + } + else { + GlobalToLocalScalarB(kernelgms, blm, patch_size, kernel_offset, kwg, true, false); + } + barrier(CLK_LOCAL_MEM_FENCE); + + // Loops over all workitem tiles, unrolled by a factor KWID + for (int pwi = 0; pwi < WGD; pwi += KWID) { + #pragma unroll + for (int _pit = 0; _pit < KWID; _pit += 1) { + int kg = pwi + _pit; + + // Loads data: local --> private (matrix A and B) + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + apd[_mi] = LocalToPrivateDirectA(alm, _mi, kg, false); + } + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + bpd[_ni] = LocalToPrivateDirectB(blm, _ni, kg, true); + } + + // Performs the accumulation (Cpmd += Apmd * Bpmd) + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]); + } + } + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // Loop over the remaining part (incomplete tile in K-dimension) + for (; kwg < patch_size; ++kwg) { + + // Loads data: off-chip --> private (matrix A and B) + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + #if defined(CONVGEMM_WITH_IM2COL) + apd[_mi] = GlobalToPrivateDirectA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false); + #else + apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg, + input_h, input_w, channels, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w); + #endif + } + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + bpd[_ni] = GlobalToPrivateDirectB(kernelgms, _ni, patch_size, kernel_offset, idn, kwg, true, false); + } + + // Performs the accumulation (Cpmd += Apmd * Bpmd) + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]); + } + } + } + + // Stores a tile of results + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + StoreResultsDirect(resultgm, cpd[_ni * MWID + _mi], _mi, _ni, idm, idn, + ONE, ZERO, num_patches, result_offset_batch, false); + } + } + } + + // Simple but slower version for the parts on the edge (incomplete tiles in M and N-dimensions) + else { + // Loops over all complete workgroup tiles (K-dimension) + int kwg = 0; + for (; kwg < (patch_size/WGD) * WGD; kwg+=WGD) { + + // Loads data: off-chip --> local + #if defined(CONVGEMM_WITH_IM2COL) + GlobalToLocalCheckedA(colgms, alm, num_patches, col_offset_batch, kwg, false, false, num_patches, patch_size); + #else + GlobalToLocalCheckedImage(imagegm, alm, image_offset_batch, h_id, w_id, kwg, + input_h, input_w, channels, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w); + #endif + GlobalToLocalCheckedB(kernelgms, blm, patch_size, kernel_offset, kwg, true, false, num_kernels, patch_size); + barrier(CLK_LOCAL_MEM_FENCE); + + // Loops over all workitem tiles, unrolled by a factor KWID + for (int pwi = 0; pwi < WGD; pwi += KWID) { + #pragma unroll + for (int _pit = 0; _pit < KWID; _pit += 1) { + int kg = pwi + _pit; + + // Loads data: local --> private + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + apd[_mi] = LocalToPrivateDirectA(alm, _mi, kg, false); + } + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + bpd[_ni] = LocalToPrivateDirectB(blm, _ni, kg, true); + } + + // Performs the accumulation (C += A * B) + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]); + } + } + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // Loop over the remaining part (incomplete tile in K-dimension) + for (; kwg < patch_size; ++kwg) { + + // Loads data: off-chip --> private + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + #if defined(CONVGEMM_WITH_IM2COL) + apd[_mi] = GlobalToPrivateCheckedA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false, num_patches); + #else + apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg, + input_h, input_w, channels, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w); + #endif + } + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + bpd[_ni] = GlobalToPrivateCheckedB(kernelgms, _ni, patch_size, kernel_offset, idn, kwg, true, false, num_kernels); + } + + // Performs the accumulation (C += A * B) + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + MultiplyAdd(cpd[_ni * MWID + _mi], apd[_mi], bpd[_ni]); + } + } + } + + // Stores a tile of results + #pragma unroll + for (int _ni = 0; _ni < NWID; _ni += 1) { + #pragma unroll + for (int _mi = 0; _mi < MWID; _mi += 1) { + StoreResultsChecked(resultgm, cpd[_ni * MWID + _mi], _mi, _ni, idm, idn, num_patches, num_kernels, + ONE, ZERO, num_patches, result_offset_batch, false); + } + } + } +} + +#endif +// ================================================================================================= + +// End of the C++11 raw string literal +)" + +// ================================================================================================= diff --git a/src/routines/levelx/xconvgemm.cpp b/src/routines/levelx/xconvgemm.cpp new file mode 100644 index 00000000..f26f23a7 --- /dev/null +++ b/src/routines/levelx/xconvgemm.cpp @@ -0,0 +1,180 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren <www.cedricnugteren.nl> +// +// This file implements the Xconvgemm class (see the header for information about the class). +// +// ================================================================================================= + +#include <string> +#include <vector> +#include <assert.h> + +#include "routines/levelx/xconvgemm.hpp" +#include "routines/levelx/xim2col.hpp" + +namespace clblast { +// ================================================================================================= + +// Constructor: forwards to base class constructor +template <typename T> +Xconvgemm<T>::Xconvgemm(Queue &queue, EventPointer event, const std::string &name, + const ConvGemmMethod method): + Routine(queue, event, name, {"XgemmDirect"}, + PrecisionValue<T>(), {}, { + (method == ConvGemmMethod::kWithIm2Col) ? "#define CONVGEMM_WITH_IM2COL\n" : "", + #include "../../kernels/level3/level3.opencl" + , // separated in multiple parts to prevent C1091 in MSVC 2013 + #include "../../kernels/level3/xgemm_direct_part1.opencl" + #include "../../kernels/level3/xgemm_direct_part2.opencl" + #include "../../kernels/level3/xgemm_direct_part3.opencl" + , // separated in multiple parts to prevent C1091 in MSVC 2013 + #include "../../kernels/levelx/xconvgemm_part1.opencl" + #include "../../kernels/levelx/xconvgemm_part2.opencl" + }), + method_(method) { +} + +// ================================================================================================= + +template <typename T> +void Xconvgemm<T>::DoConvgemm(const size_t channels, const size_t height, const size_t width, + const size_t kernel_h, const size_t kernel_w, const size_t pad_h, + const size_t pad_w, const size_t stride_h, const size_t stride_w, + const size_t dilation_h, const size_t dilation_w, + const size_t num_kernels, const size_t batch_count, + const Buffer<T> &im_buffer, const size_t im_offset, + const Buffer<T> &kernel_buffer, const size_t kernel_offset, + const Buffer<T> &result_buffer, const size_t result_offset) { + + // TODO: Implement single-kernel approach + assert(method_ == ConvGemmMethod::kWithIm2Col); + + // Tests for a valid batch count + if (batch_count == 0) { + throw BLASError(StatusCode::kInvalidBatchCount); + } + + // Makes sure all dimensions are larger than zero + if ((channels == 0) || (height == 0) || (width == 0) || (num_kernels == 0)) { + throw BLASError(StatusCode::kInvalidDimension); + } + + // Sets the output height and width + const auto size_h = height + 2 * pad_h; + const auto padding_h = dilation_h * (kernel_h - 1) + 1; + const auto output_h = (size_h >= padding_h) ? (size_h - padding_h) / stride_h + 1 : 1; + const auto size_w = width + 2 * pad_w; + const auto padding_w = dilation_w * (kernel_w - 1) + 1; + const auto output_w = (size_w >= padding_w) ? (size_w - padding_w) / stride_w + 1 : 1; + + // Sets other useful variables + const auto patch_size = kernel_h * kernel_w * channels; + const auto num_patches = output_h * output_w; + + // Possible approach: im2col + GEMM + // result = GEMM(im2col(image), kernel) + auto col_buffer = Buffer<T>(context_, 0); // nullptr, will be optionally created later + if (method_ == ConvGemmMethod::kWithIm2Col) { + + // Temporary col matrix + const auto col_size = (method_ == ConvGemmMethod::kWithIm2Col) ? patch_size * num_patches * batch_count : 1; + col_buffer = Buffer<T>(context_, col_size); + + // Loops over each batch + for (auto batch_id = size_t{0}; batch_id < batch_count; ++batch_id) { + + // im2col + const auto im_batch_offset = batch_id * channels * height * width + im_offset; + const auto col_batch_offset = batch_id * patch_size * num_patches; + auto im2col_event = Event(); + auto im2col = Xim2col<T>(queue_, im2col_event.pointer()); + im2col.DoIm2col(channels, height, width, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + im_buffer, im_batch_offset, + col_buffer, col_batch_offset); + im2col_event.WaitForCompletion(); + } + } + + // Strided batched GEMM: C (result) = alpha (1) * A (col) * B (kernel) + beta (0) * C (result) + const auto col_stride = patch_size * num_patches; + const auto result_stride = num_kernels * output_h * output_w; + + // Tests the matrices for validity + TestMatrixB(patch_size, num_kernels, kernel_buffer, kernel_offset, patch_size); + for (auto batch = size_t{0}; batch < batch_count; ++batch) { + if (method_ == ConvGemmMethod::kWithIm2Col) { + TestMatrixA(num_patches, patch_size, col_buffer, col_stride * batch, num_patches); + } + else { + // TODO: check for valid image tensor + } + TestMatrixC(num_patches, num_kernels, result_buffer, result_offset + result_stride * batch, num_patches); + } + + // Retrieves the proper XgemmDirect kernel from the compiled binary + auto kernel = Kernel(program_, "Xconvgemm"); + + // Sets the kernel arguments + kernel.SetArgument(0, static_cast<int>(num_patches)); + kernel.SetArgument(1, static_cast<int>(num_kernels)); + kernel.SetArgument(2, static_cast<int>(patch_size)); + kernel.SetArgument(3, kernel_buffer()); + kernel.SetArgument(4, static_cast<int>(kernel_offset)); + kernel.SetArgument(5, result_buffer()); + kernel.SetArgument(6, static_cast<int>(result_offset)); + kernel.SetArgument(7, static_cast<int>(result_stride)); + if (method_ == ConvGemmMethod::kWithIm2Col) { + kernel.SetArgument(8, col_buffer()); + kernel.SetArgument(9, static_cast<int>(0)); + kernel.SetArgument(10, static_cast<int>(col_stride)); + } + if (method_ == ConvGemmMethod::kSingleKernel) { + kernel.SetArgument(8, im_buffer()); + kernel.SetArgument(9, static_cast<int>(im_offset)); + kernel.SetArgument(10, static_cast<int>(height)); + kernel.SetArgument(11, static_cast<int>(width)); + kernel.SetArgument(12, static_cast<int>(channels)); + kernel.SetArgument(13, static_cast<int>(kernel_h)); + kernel.SetArgument(14, static_cast<int>(kernel_w)); + kernel.SetArgument(15, static_cast<int>(pad_h)); + kernel.SetArgument(16, static_cast<int>(pad_w)); + kernel.SetArgument(17, static_cast<int>(stride_h)); + kernel.SetArgument(18, static_cast<int>(stride_w)); + kernel.SetArgument(19, static_cast<int>(dilation_h)); + kernel.SetArgument(20, static_cast<int>(dilation_w)); + kernel.SetArgument(21, static_cast<int>(output_h)); + kernel.SetArgument(22, static_cast<int>(output_w)); + } + + // Computes the global and local thread sizes + const auto m_ceiled = Ceil(num_patches, db_["WGD"]); + const auto n_ceiled = Ceil(num_kernels, db_["WGD"]); + const auto global = std::vector<size_t>{ + (m_ceiled * db_["MDIMCD"]) / db_["WGD"], + (n_ceiled * db_["NDIMCD"]) / db_["WGD"], + batch_count + }; + const auto local = std::vector<size_t>{db_["MDIMCD"], db_["NDIMCD"], 1}; + + // Launches the kernel + RunKernel(kernel, queue_, device_, global, local, event_); +} + +// ================================================================================================= + +// Compiles the templated class +template class Xconvgemm<half>; +template class Xconvgemm<float>; +template class Xconvgemm<double>; +template class Xconvgemm<float2>; +template class Xconvgemm<double2>; + +// ================================================================================================= +} // namespace clblast diff --git a/src/routines/levelx/xconvgemm.hpp b/src/routines/levelx/xconvgemm.hpp new file mode 100644 index 00000000..9d11ccee --- /dev/null +++ b/src/routines/levelx/xconvgemm.hpp @@ -0,0 +1,53 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren <www.cedricnugteren.nl> +// +// This file implements the Xconvgemm routine. The precision is implemented as a template argument. +// This implements batched convolution of a 4D input 'image' tensor, a 3D input 'kernel' matrix, +// resulting in a 4D output 'result' tensor. +// +// ================================================================================================= + +#ifndef CLBLAST_ROUTINES_XCONVGEMM_H_ +#define CLBLAST_ROUTINES_XCONVGEMM_H_ + +#include "routine.hpp" + +namespace clblast { +// ================================================================================================= + +// See comment at top of file for a description of the class +template <typename T> +class Xconvgemm: public Routine { + public: + + // Constructor + enum class ConvGemmMethod {kWithIm2Col, kSingleKernel}; + Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM", + const ConvGemmMethod method = ConvGemmMethod::kWithIm2Col); + + // Templated-precision implementation of the routine + void DoConvgemm(const size_t channels, const size_t height, const size_t width, + const size_t kernel_h, const size_t kernel_w, + const size_t pad_h, const size_t pad_w, + const size_t stride_h, const size_t stride_w, + const size_t dilation_h, const size_t dilation_w, + const size_t num_kernels, const size_t batch_count, + const Buffer<T> &im_buffer, const size_t im_offset, + const Buffer<T> &kernel_buffer, const size_t kernel_offset, + const Buffer<T> &result_buffer, const size_t result_offset); + + private: + const ConvGemmMethod method_; +}; + +// ================================================================================================= +} // namespace clblast + +// CLBLAST_ROUTINES_XCONVGEMM_H_ +#endif diff --git a/src/routines/routines.hpp b/src/routines/routines.hpp index 2ab16a75..e080ed47 100644 --- a/src/routines/routines.hpp +++ b/src/routines/routines.hpp @@ -70,6 +70,7 @@ #include "routines/levelx/xhad.hpp" #include "routines/levelx/xomatcopy.hpp" #include "routines/levelx/xim2col.hpp" +#include "routines/levelx/xconvgemm.hpp" #include "routines/levelx/xaxpybatched.hpp" #include "routines/levelx/xgemmbatched.hpp" #include "routines/levelx/xgemmstridedbatched.hpp" diff --git a/src/utilities/utilities.hpp b/src/utilities/utilities.hpp index 37d71794..16a241af 100644 --- a/src/utilities/utilities.hpp +++ b/src/utilities/utilities.hpp @@ -87,6 +87,7 @@ constexpr auto kArgImaxOffset = "offimax"; constexpr auto kArgAlpha = "alpha"; constexpr auto kArgBeta = "beta"; constexpr auto kArgBatchCount = "batch_num"; +constexpr auto kArgNumKernels = "num_kernels"; // Constants for im2col constexpr auto kArgChannels = "channels"; @@ -199,7 +200,7 @@ struct Arguments { size_t imax_offset = 0; T alpha = ConstantOne<T>(); T beta = ConstantOne<T>(); - // Arguments for im2col + // Arguments for im2col and convgemm size_t channels = 1; size_t height = 1; size_t width = 1; @@ -211,6 +212,7 @@ struct Arguments { size_t stride_w = 1; size_t dilation_h = 1; size_t dilation_w = 1; + size_t num_kernels = 1; // Batch-specific arguments size_t batch_count = 1; std::vector<size_t> x_offsets; // = {0}; diff --git a/test/correctness/routines/levelx/xconvgemm.cpp b/test/correctness/routines/levelx/xconvgemm.cpp new file mode 100644 index 00000000..a120baa7 --- /dev/null +++ b/test/correctness/routines/levelx/xconvgemm.cpp @@ -0,0 +1,24 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren <www.cedricnugteren.nl> +// +// ================================================================================================= + +#include "test/correctness/testblas.hpp" +#include "test/routines/levelx/xconvgemm.hpp" + +// Main function (not within the clblast namespace) +int main(int argc, char *argv[]) { + auto errors = size_t{0}; + errors += clblast::RunTests<clblast::TestXconvgemm<float>, float, float>(argc, argv, false, "SCONVGEMM"); + errors += clblast::RunTests<clblast::TestXconvgemm<double>, double, double>(argc, argv, true, "DCONVGEMM"); + errors += clblast::RunTests<clblast::TestXconvgemm<clblast::half>, clblast::half, clblast::half>(argc, argv, true, "HCONVGEMM"); + if (errors > 0) { return 1; } else { return 0; } +} + +// ================================================================================================= diff --git a/test/correctness/testblas.hpp b/test/correctness/testblas.hpp index 54b2d6f8..e9a995fd 100644 --- a/test/correctness/testblas.hpp +++ b/test/correctness/testblas.hpp @@ -60,6 +60,9 @@ class TestBlas: public Tester<T,U> { static const std::vector<size_t> kDilationSizes; static const std::vector<size_t> kKernelSizes; static const std::vector<size_t> kBatchCounts; + static const std::vector<size_t> kNumKernels; + static const std::vector<size_t> kStrideValues; + static const std::vector<size_t> kChannelValues; const std::vector<size_t> kOffsets; const std::vector<U> kAlphaValues; const std::vector<U> kBetaValues; @@ -136,6 +139,9 @@ template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kBatc template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kPadSizes = { 0, 1 }; template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kDilationSizes = { 1, 2 }; template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kKernelSizes = { 1, 3 }; +template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kNumKernels = { 1, 6 }; +template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kStrideValues = { 1, 3 }; +template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kChannelValues = { 1, 2 }; // Test settings for the invalid tests template <typename T, typename U> const std::vector<size_t> TestBlas<T,U>::kInvalidIncrements = { 0, 1 }; @@ -241,6 +247,7 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na auto dilation_hs = std::vector<size_t>{args.dilation_h}; auto dilation_ws = std::vector<size_t>{args.dilation_w}; auto batch_counts = std::vector<size_t>{args.batch_count}; + auto num_kernelss = std::vector<size_t>{args.num_kernels}; auto x_sizes = std::vector<size_t>{args.x_size}; auto y_sizes = std::vector<size_t>{args.y_size}; auto a_sizes = std::vector<size_t>{args.a_size}; @@ -284,18 +291,19 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na if (option == kArgImaxOffset) { imax_offsets = tester.kOffsets; } if (option == kArgAlpha) { alphas = tester.kAlphaValues; } if (option == kArgBeta) { betas = tester.kBetaValues; } - if (option == kArgChannels) { channelss = tester.kKernelSizes; } + if (option == kArgChannels) { channelss = tester.kChannelValues; } if (option == kArgHeight) { heights = tester.kMatrixDims; } if (option == kArgWidth) { widths = tester.kMatrixDims; } if (option == kArgKernelH) { kernel_hs = tester.kKernelSizes; } if (option == kArgKernelW) { kernel_ws = tester.kKernelSizes; } if (option == kArgPadH) { pad_hs = tester.kPadSizes; } if (option == kArgPadW) { pad_ws = tester.kPadSizes; } - if (option == kArgStrideH) { stride_hs = tester.kKernelSizes; } - if (option == kArgStrideW) { stride_ws = tester.kKernelSizes; } + if (option == kArgStrideH) { stride_hs = tester.kStrideValues; } + if (option == kArgStrideW) { stride_ws = tester.kStrideValues; } if (option == kArgDilationH) { dilation_hs = tester.kDilationSizes; } if (option == kArgDilationW) { dilation_ws = tester.kDilationSizes; } if (option == kArgBatchCount) { batch_counts = tester.kBatchCounts; } + if (option == kArgNumKernels) { num_kernelss = tester.kNumKernels; } if (option == kArgXOffset) { x_sizes = tester.kVecSizes; } if (option == kArgYOffset) { y_sizes = tester.kVecSizes; } @@ -350,8 +358,10 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na for (auto &dilation_h: dilation_hs) { r_args.dilation_h = dilation_h; for (auto &dilation_w: dilation_ws) { r_args.dilation_w = dilation_w; for (auto &batch_count: batch_counts) { r_args.batch_count = batch_count; - C::SetSizes(r_args, tester.queue_); - regular_test_vector.push_back(r_args); + for (auto &num_kernels: num_kernelss) { r_args.num_kernels = num_kernels; + C::SetSizes(r_args, tester.queue_); + regular_test_vector.push_back(r_args); + } } } } diff --git a/test/correctness/tester.cpp b/test/correctness/tester.cpp index d6a346a6..daa43f26 100644 --- a/test/correctness/tester.cpp +++ b/test/correctness/tester.cpp @@ -370,6 +370,7 @@ std::string Tester<T,U>::GetOptionsString(const Arguments<U> &args) { if (o == kArgChannels) { result += kArgChannels + equals + ToString(args.channels) + " "; } if (o == kArgHeight) { result += kArgHeight + equals + ToString(args.height) + " "; } if (o == kArgWidth) { result += kArgWidth + equals + ToString(args.width) + " "; } + if (o == kArgNumKernels){result += kArgNumKernels + equals + ToString(args.num_kernels) + " "; } if (o == kArgKernelH) { result += kArgKernelH + equals + ToString(args.kernel_h) + " "; } if (o == kArgKernelW) { result += kArgKernelW + equals + ToString(args.kernel_w) + " "; } if (o == kArgPadH) { result += kArgPadH + equals + ToString(args.pad_h) + " "; } diff --git a/test/performance/client.cpp b/test/performance/client.cpp index e2d1a6c7..377e0140 100644 --- a/test/performance/client.cpp +++ b/test/performance/client.cpp @@ -106,7 +106,7 @@ Arguments<U> Client<T,U>::ParseArguments(int argc, char *argv[], const size_t le if (o == kArgAlpha) { args.alpha = GetArgument(command_line_args, help, kArgAlpha, GetScalar<U>()); } if (o == kArgBeta) { args.beta = GetArgument(command_line_args, help, kArgBeta, GetScalar<U>()); } - // Arguments for im2col + // Arguments for im2col and convgemm if (o == kArgChannels) { args.channels = GetArgument(command_line_args, help, kArgChannels, size_t{64}); } if (o == kArgHeight) { args.height = GetArgument(command_line_args, help, kArgHeight, size_t{64}); } if (o == kArgWidth) { args.width = GetArgument(command_line_args, help, kArgWidth, size_t{64}); } @@ -118,6 +118,7 @@ Arguments<U> Client<T,U>::ParseArguments(int argc, char *argv[], const size_t le if (o == kArgStrideW) { args.stride_w = GetArgument(command_line_args, help, kArgStrideW, size_t{1}); } if (o == kArgDilationH) { args.dilation_h = GetArgument(command_line_args, help, kArgDilationH, size_t{1}); } if (o == kArgDilationW) { args.dilation_w = GetArgument(command_line_args, help, kArgDilationW, size_t{1}); } + if (o == kArgNumKernels){ args.num_kernels = GetArgument(command_line_args, help, kArgNumKernels, size_t{1}); } } // These are the options common to all routines @@ -446,6 +447,7 @@ void Client<T,U>::PrintTableRow(const Arguments<U>& args, else if (o == kArgStrideW) {integers.push_back(args.stride_w); } else if (o == kArgDilationH) {integers.push_back(args.dilation_h); } else if (o == kArgDilationW) {integers.push_back(args.dilation_w); } + else if (o == kArgNumKernels){integers.push_back(args.num_kernels); } } auto strings = std::vector<std::string>{}; for (auto &o: options_) { diff --git a/test/performance/routines/levelx/xconvgemm.cpp b/test/performance/routines/levelx/xconvgemm.cpp new file mode 100644 index 00000000..2e255f8c --- /dev/null +++ b/test/performance/routines/levelx/xconvgemm.cpp @@ -0,0 +1,31 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren <www.cedricnugteren.nl> +// +// ================================================================================================= + +#include "test/performance/client.hpp" +#include "test/routines/levelx/xconvgemm.hpp" + +// Main function (not within the clblast namespace) +int main(int argc, char *argv[]) { + const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv); + switch(clblast::GetPrecision(command_line_args, clblast::Precision::kSingle)) { + case clblast::Precision::kHalf: + clblast::RunClient<clblast::TestXconvgemm<clblast::half>, clblast::half, clblast::half>(argc, argv); break; + case clblast::Precision::kSingle: + clblast::RunClient<clblast::TestXconvgemm<float>, float, float>(argc, argv); break; + case clblast::Precision::kDouble: + clblast::RunClient<clblast::TestXconvgemm<double>, double, double>(argc, argv); break; + case clblast::Precision::kComplexSingle: throw std::runtime_error("Unsupported precision mode"); + case clblast::Precision::kComplexDouble: throw std::runtime_error("Unsupported precision mode"); + } + return 0; +} + +// ================================================================================================= diff --git a/test/routines/levelx/xconvgemm.hpp b/test/routines/levelx/xconvgemm.hpp new file mode 100644 index 00000000..7fa4e701 --- /dev/null +++ b/test/routines/levelx/xconvgemm.hpp @@ -0,0 +1,243 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren <www.cedricnugteren.nl> +// +// This file implements a class with static methods to describe the Xconvgemm routine. Examples of +// such 'descriptions' are how to calculate the size a of buffer or how to run the routine. These +// static methods are used by the correctness tester and the performance tester. +// +// ================================================================================================= + +#ifndef CLBLAST_TEST_ROUTINES_XCONVGEMM_H_ +#define CLBLAST_TEST_ROUTINES_XCONVGEMM_H_ + +#include "test/routines/common.hpp" + +namespace clblast { +// ================================================================================================= + +// See comment at top of file for a description of the class +template <typename T> +class TestXconvgemm { +public: + + // The BLAS level: 4 for the extra routines + static size_t BLASLevel() { return 4; } + + // The list of arguments relevant for this routine + static std::vector<std::string> GetOptions() { + return {kArgChannels, kArgHeight, kArgWidth, kArgKernelH, kArgKernelW, kArgPadH, kArgPadW, + kArgStrideH, kArgStrideW, kArgDilationH, kArgDilationW, kArgNumKernels, kArgBatchCount, + kArgAOffset, kArgBOffset, kArgCOffset}; + } + static std::vector<std::string> BuffersIn() { return {kBufMatA, kBufMatB, kBufMatC}; } + static std::vector<std::string> BuffersOut() { return {kBufMatC}; } + + // Describes how to obtain the sizes of the buffers + static size_t OutputHeight(const Arguments<T> &args) { + const auto size = args.height + 2 * args.pad_h; + const auto padding = args.dilation_h * (args.kernel_h - 1) + 1; + if (size >= padding) { return (size - padding) / args.stride_h + 1; } + return 1; + } + static size_t OutputWidth(const Arguments<T> &args) { + const auto size = args.width + 2 * args.pad_w; + const auto padding = args.dilation_w * (args.kernel_w - 1) + 1; + if (size >= padding) { return (size - padding) / args.stride_w + 1; } + return 1; + } + static size_t NumPatches(const Arguments<T> &args) { + return OutputHeight(args) * OutputWidth(args) * args.channels; + } + static size_t GetSizeA(const Arguments<T> &args) { // 4D: NCHW == batch-channel-height-width + return args.batch_count * args.channels * args.height * args.width + args.a_offset; + } + static size_t GetSizeB(const Arguments<T> &args) { // 4D: KCHW == kernel-channel-height-width + return args.num_kernels * args.channels * args.kernel_h * args.kernel_w + args.b_offset; + } + static size_t GetSizeC(const Arguments<T> &args) { // 4D: NCHW == batch-channel-height-width + return args.batch_count * args.num_kernels * OutputHeight(args) * OutputWidth(args) + args.c_offset; + } + + // Describes how to set the sizes of all the buffers + static void SetSizes(Arguments<T> &args, Queue&) { + args.a_size = GetSizeA(args); + args.b_size = GetSizeB(args); + args.c_size = GetSizeC(args); + } + + // Describes what the default values of the leading dimensions of the matrices are + static size_t DefaultLDA(const Arguments<T> &) { return 1; } // N/A for this routine + static size_t DefaultLDB(const Arguments<T> &) { return 1; } // N/A for this routine + static size_t DefaultLDC(const Arguments<T> &) { return 1; } // N/A for this routine + + // Describes which transpose options are relevant for this routine + using Transposes = std::vector<Transpose>; + static Transposes GetATransposes(const Transposes &) { return {}; } // N/A for this routine + static Transposes GetBTransposes(const Transposes &) { return {}; } // N/A for this routine + + // Describes how to prepare the input data + static void PrepareData(const Arguments<T>&, Queue&, const int, std::vector<T>&, + std::vector<T>&, std::vector<T>&, std::vector<T>&, std::vector<T>&, + std::vector<T>&, std::vector<T>&) {} // N/A for this routine + + // Describes how to run the CLBlast routine + static StatusCode RunRoutine(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) { +#ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Convgemm<T>(args.channels, args.height, args.width, + args.kernel_h, args.kernel_w, + args.pad_h, args.pad_w, + args.stride_h, args.stride_w, + args.dilation_h, args.dilation_w, + args.num_kernels, args.batch_count, + buffers.a_mat(), args.a_offset, + buffers.b_mat(), args.b_offset, + buffers.c_mat(), args.c_offset, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } +#elif CUDA_API + auto status = Convgemm<T>(args.channels, args.height, args.width, + args.kernel_h, args.kernel_w, + args.pad_h, args.pad_w, + args.stride_h, args.stride_w, + args.dilation_h, args.dilation_w, + args.num_kernels, args.batch_count, + buffers.a_mat(), args.a_offset, + buffers.b_mat(), args.b_offset, + buffers.c_mat(), args.c_offset, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); +#endif + return status; + } + + // Describes how to run a naive version of the routine (for correctness/performance comparison). + // Note that a proper clBLAS or CPU BLAS comparison is not available for non-BLAS routines. + static StatusCode RunReference1(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) { + auto buffers_host = BuffersHost<T>(); + DeviceToHost(args, buffers, buffers_host, queue, BuffersIn()); + const auto status = RunReference(args, buffers_host); + HostToDevice(args, buffers, buffers_host, queue, BuffersOut()); + return status; + } + + static StatusCode RunReference2(const Arguments<T> &args, BuffersHost<T> &buffers_host, Queue&) { + return RunReference(args, buffers_host); + } + static StatusCode RunReference3(const Arguments<T> &, BuffersCUDA<T> &, Queue &) { + return StatusCode::kUnknownError; + } + + // Describes how to download the results of the computation (more importantly: which buffer) + static std::vector<T> DownloadResult(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) { + std::vector<T> result(args.c_size, static_cast<T>(0)); + buffers.c_mat.Read(queue, args.c_size, result); + return result; + } + + // Describes how to compute the indices of the result buffer + static size_t ResultID1(const Arguments<T> &args) { return OutputHeight(args) * OutputWidth(args); } + static size_t ResultID2(const Arguments<T> &args) { return args.num_kernels * args.batch_count; } + static size_t GetResultIndex(const Arguments<T> &args, const size_t id1, const size_t id2) { + return id1 + OutputHeight(args) * OutputWidth(args) * id2 + args.c_offset; + } + + // Describes how to compute performance metrics + static size_t GetFlops(const Arguments<T> &args) { + const auto patch_size = args.kernel_h * args.kernel_w * args.channels; + const auto num_patches = OutputHeight(args) * OutputWidth(args); + return args.batch_count * 2 * num_patches * args.num_kernels * patch_size; + } + static size_t GetBytes(const Arguments<T> &args) { + return (GetSizeA(args) + GetSizeB(args) + GetSizeC(args)) * sizeof(T); + } +}; + +// ================================================================================================= + +template <typename T> +StatusCode RunReference(const Arguments<T> &args, BuffersHost<T> &buffers_host) { + const auto output_h = TestXconvgemm<T>::OutputHeight(args); + const auto output_w = TestXconvgemm<T>::OutputWidth(args); + for (auto batch_id = size_t{0}; batch_id < args.batch_count; ++batch_id) { + for (auto co_id = size_t{0}; co_id < args.num_kernels; ++co_id) { // output channels == num-kernels + for (auto ho_id = size_t{0}; ho_id < output_h; ++ho_id) { // image height + for (auto wo_id = size_t{0}; wo_id < output_w; ++wo_id) { // image width + auto result = ConstantZero<T>(); + + // 3D convolution + for (auto ci_id = size_t{0}; ci_id < args.channels; ++ci_id) { // input channels + for (auto kh_id = size_t{0}; kh_id < args.kernel_h; ++kh_id) { // kernel height + for (auto kw_id = size_t{0}; kw_id < args.kernel_w; ++kw_id) { // kernel width + + // Retrieves the value from the input image + const auto hi_id = kh_id * args.dilation_h + args.stride_h * ho_id - args.pad_h; + const auto wi_id = kw_id * args.dilation_w + args.stride_w * wo_id - args.pad_w; + if (hi_id >= 0 && hi_id < args.height && + wi_id >= 0 && wi_id < args.width) { + const auto input_index = wi_id + args.width * ( + hi_id + args.height * ( + ci_id + args.channels * ( + batch_id))); + const auto input_value = buffers_host.a_mat[input_index + args.a_offset]; + + // Multiplies with the kernel tensor + const auto kernel_index = kw_id + args.kernel_w * ( + kh_id + args.kernel_h * ( + ci_id + args.channels * ( + co_id))); + const auto kernel_value = buffers_host.b_mat[kernel_index + args.b_offset]; + result += input_value * kernel_value; + + } + } + } + } + + // Sets the output value (NCHW == batch-channel-height-width) + const auto output_index = wo_id + output_w * ( + ho_id + output_h * ( + co_id + args.num_kernels * ( + batch_id))); + buffers_host.c_mat[output_index + args.c_offset] = result; + } + } + } + } + return StatusCode::kSuccess; +} + +// Half-precision version calling the above reference implementation after conversions +template <> +StatusCode RunReference<half>(const Arguments<half> &args, BuffersHost<half> &buffers_host) { + auto a_buffer2 = HalfToFloatBuffer(buffers_host.a_mat); + auto b_buffer2 = HalfToFloatBuffer(buffers_host.b_mat); + auto c_buffer2 = HalfToFloatBuffer(buffers_host.c_mat); + auto dummy = std::vector<float>(0); + auto buffers2 = BuffersHost<float>{dummy, dummy, a_buffer2, b_buffer2, c_buffer2, dummy, dummy}; + auto args2 = Arguments<float>(); + args2.a_size = args.a_size; args2.b_size = args.b_size; args2.c_size = args.c_size; + args2.channels = args.channels; args2.height = args.height; args2.width = args.width; + args2.kernel_h = args.kernel_h; args2.kernel_w = args.kernel_w; + args2.pad_h = args.pad_h; args2.pad_w = args.pad_w; + args2.stride_h = args.stride_h; args2.stride_w = args.stride_w; + args2.dilation_h = args.dilation_h; args2.dilation_w = args.dilation_w; + args2.num_kernels = args.num_kernels; args2.batch_count = args.batch_count; + args2.a_offset = args.a_offset; args2.b_offset = args.b_offset; args2.c_offset = args.c_offset; + auto status = RunReference(args2, buffers2); + FloatToHalfBuffer(buffers_host.c_mat, buffers2.c_mat); + return status; +} + +// ================================================================================================= +} // namespace clblast + +// CLBLAST_TEST_ROUTINES_XCONVGEMM_H_ +#endif |