From 5d87abf78080de8e844ff93822da49d2c8a7deb3 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Mon, 21 May 2018 11:28:11 +0200 Subject: Added method selection option to switch between im2col and single-kernel approach for convgemm --- src/kernels/levelx/xconvgemm_part1.opencl | 4 +- src/kernels/levelx/xconvgemm_part2.opencl | 44 +++++++++---- src/routines/levelx/xconvgemm.cpp | 105 +++++++++++++++++------------- src/routines/levelx/xconvgemm.hpp | 7 +- 4 files changed, 102 insertions(+), 58 deletions(-) diff --git a/src/kernels/levelx/xconvgemm_part1.opencl b/src/kernels/levelx/xconvgemm_part1.opencl index ac13219f..6f870ec0 100644 --- a/src/kernels/levelx/xconvgemm_part1.opencl +++ b/src/kernels/levelx/xconvgemm_part1.opencl @@ -9,6 +9,8 @@ // // This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It // uses parameters from the direct GEMM kernel. This is the part with the loads from memory (1/2). +// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running +// the im2col kernel to create a 'col' temporary matrix. // // ================================================================================================= @@ -17,7 +19,7 @@ R"( // ================================================================================================= -#if defined(ROUTINE_CONVGEMM) +#if defined(ROUTINE_CONVGEMM) && !defined(CONVGEMM_WITH_IM2COL) // Loads global off-chip memory into thread-private register files. This function is specific for // loading the image input tensor. This includes a bounds check. diff --git a/src/kernels/levelx/xconvgemm_part2.opencl b/src/kernels/levelx/xconvgemm_part2.opencl index 09eb45ea..f9b78974 100644 --- a/src/kernels/levelx/xconvgemm_part2.opencl +++ b/src/kernels/levelx/xconvgemm_part2.opencl @@ -9,6 +9,8 @@ // // This file contains the an implementation of 3D convolution on a 4D image using GEMM kernels. It // uses parameters from the direct GEMM kernel. This part contains the main kernel (2/2). +// This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running +// the im2col kernel to create a 'col' temporary matrix. // // ================================================================================================= @@ -22,28 +24,37 @@ R"( // ConvGEMM kernel __kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) void Xconvgemm(const int num_patches, const int num_kernels, const int patch_size, - const __global realMD* restrict colgm, const int col_offset, const int col_stride, const __global realND* restrict kernelgm, const int kernel_offset, __global real* resultgm, const int result_offset, const int result_stride, +#if defined(CONVGEMM_WITH_IM2COL) + const __global realMD* restrict colgm, const int col_offset, const int col_stride) +#else + const __global realMD* restrict imagegm, const int image_offset, const int input_h, const int input_w, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, - const __global realMD* restrict imagegm, const int image_offset, - const int output_h, const int output_w) { + const int output_h, const int output_w) +#endif +{ // Batch offsets const int batch = get_group_id(2); - const int image_offset_batch = image_offset + channels * input_h * input_w * batch; - const int col_offset_batch = col_offset + col_stride * batch; + #if defined(CONVGEMM_WITH_IM2COL) + const int col_offset_batch = col_offset + col_stride * batch; + #else + const int image_offset_batch = image_offset + channels * input_h * input_w * batch; + #endif const int result_offset_batch = result_offset + result_stride * batch; __local real alm[WGD * (WGD + PADA)]; __local real blm[WGD * (WGD + PADB)]; // Extra pointers to scalar versions of global memory - const __global real* restrict colgms = (const __global real* restrict) colgm; + #if defined(CONVGEMM_WITH_IM2COL) + const __global real* restrict colgms = (const __global real* restrict) colgm; + #endif const __global real* restrict kernelgms = (const __global real* restrict) kernelgm; // Allocates workitem-private memory (registers) @@ -63,12 +74,17 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz } } - // The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section - // processes only the main parts: output blocks of WGD by WGD. + // Global m/n indices const int idm = get_local_id(0) * MWID + GetGroupID0() * WGD; const int idn = get_local_id(1) * NWID + GetGroupID1() * WGD; - const int w_id = idm % output_w; - const int h_id = idm / output_w; + #if !defined(CONVGEMM_WITH_IM2COL) + const int w_id = idm % output_w; + const int h_id = idm / output_w; + #endif + + // The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section + // processes only the main parts: output blocks of WGD by WGD. + #if defined(CONVGEMM_WITH_IM2COL) // TEMP: To be implemented for other case as well if ((idm < (num_patches/WGD)*WGD) && (idn < (num_kernels/WGD)*WGD)) { // Loops over all complete workgroup tiles (K-dimension) @@ -155,7 +171,9 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz // Simple but slower version for the parts on the edge (incomplete tiles in M and N-dimensions) else { - + #else // TEMP, to be implemented + { // TEMP, to be implemented + #endif // TEMP, to be implemented // Loops over all complete workgroup tiles (K-dimension) int kwg = 0; for (; kwg < (patch_size/WGD) * WGD; kwg+=WGD) { @@ -207,10 +225,14 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz // Loads data: off-chip --> private #pragma unroll for (int _mi = 0; _mi < MWID; _mi += 1) { + #if defined(CONVGEMM_WITH_IM2COL) + apd[_mi] = GlobalToPrivateCheckedA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false, num_patches); + #else apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg, input_h, input_w, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w); + #endif } #pragma unroll for (int _ni = 0; _ni < NWID; _ni += 1) { diff --git a/src/routines/levelx/xconvgemm.cpp b/src/routines/levelx/xconvgemm.cpp index 055a3dda..5ad39751 100644 --- a/src/routines/levelx/xconvgemm.cpp +++ b/src/routines/levelx/xconvgemm.cpp @@ -22,9 +22,11 @@ namespace clblast { // Constructor: forwards to base class constructor template -Xconvgemm::Xconvgemm(Queue &queue, EventPointer event, const std::string &name): +Xconvgemm::Xconvgemm(Queue &queue, EventPointer event, const std::string &name, + const ConvGemmMethod method): Routine(queue, event, name, {"XgemmDirect"}, PrecisionValue(), {}, { + (method == ConvGemmMethod::kWithIm2Col) ? "#define CONVGEMM_WITH_IM2COL\n" : "", #include "../../kernels/level3/level3.opencl" , // separated in multiple parts to prevent C1091 in MSVC 2013 #include "../../kernels/level3/xgemm_direct_part1.opencl" @@ -33,7 +35,8 @@ Xconvgemm::Xconvgemm(Queue &queue, EventPointer event, const std::string &nam , // separated in multiple parts to prevent C1091 in MSVC 2013 #include "../../kernels/levelx/xconvgemm_part1.opencl" #include "../../kernels/levelx/xconvgemm_part2.opencl" - }) { + }), + method_(method) { } // ================================================================================================= @@ -70,26 +73,29 @@ void Xconvgemm::DoConvgemm(const size_t channels, const size_t height, const const auto patch_size = kernel_h * kernel_w * channels; const auto num_patches = output_h * output_w; - // Approach: im2col + GEMM + // Possible approach: im2col + GEMM // result = GEMM(im2col(image), kernel) - - // Temporary col matrix - const auto col_size = patch_size * num_patches * batch_count; - auto col_buffer = Buffer(context_, col_size); - - // Loops over each batch - for (auto batch_id = size_t{0}; batch_id < batch_count; ++batch_id) { - - // im2col - const auto im_batch_offset = batch_id * channels * height * width + im_offset; - const auto col_batch_offset = batch_id * patch_size * num_patches; - auto im2col_event = Event(); - auto im2col = Xim2col(queue_, im2col_event.pointer()); - im2col.DoIm2col(channels, height, width, kernel_h, kernel_w, - pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, - im_buffer, im_batch_offset, - col_buffer, col_batch_offset); - im2col_event.WaitForCompletion(); + auto col_buffer = Buffer(context_, 0); // nullptr, will be optionally created later + if (method_ == ConvGemmMethod::kWithIm2Col) { + + // Temporary col matrix + const auto col_size = (method_ == ConvGemmMethod::kWithIm2Col) ? patch_size * num_patches * batch_count : 1; + col_buffer = Buffer(context_, col_size); + + // Loops over each batch + for (auto batch_id = size_t{0}; batch_id < batch_count; ++batch_id) { + + // im2col + const auto im_batch_offset = batch_id * channels * height * width + im_offset; + const auto col_batch_offset = batch_id * patch_size * num_patches; + auto im2col_event = Event(); + auto im2col = Xim2col(queue_, im2col_event.pointer()); + im2col.DoIm2col(channels, height, width, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + im_buffer, im_batch_offset, + col_buffer, col_batch_offset); + im2col_event.WaitForCompletion(); + } } // Strided batched GEMM: C (result) = alpha (1) * A (col) * B (kernel) + beta (0) * C (result) @@ -99,7 +105,12 @@ void Xconvgemm::DoConvgemm(const size_t channels, const size_t height, const // Tests the matrices for validity TestMatrixB(patch_size, num_kernels, kernel_buffer, kernel_offset, patch_size); for (auto batch = size_t{0}; batch < batch_count; ++batch) { - TestMatrixA(num_patches, patch_size, col_buffer, col_stride * batch, num_patches); + if (method_ == ConvGemmMethod::kWithIm2Col) { + TestMatrixA(num_patches, patch_size, col_buffer, col_stride * batch, num_patches); + } + else { + // TODO: check for valid image tensor + } TestMatrixC(num_patches, num_kernels, result_buffer, result_offset + result_stride * batch, num_patches); } @@ -110,29 +121,33 @@ void Xconvgemm::DoConvgemm(const size_t channels, const size_t height, const kernel.SetArgument(0, static_cast(num_patches)); kernel.SetArgument(1, static_cast(num_kernels)); kernel.SetArgument(2, static_cast(patch_size)); - kernel.SetArgument(3, col_buffer()); - kernel.SetArgument(4, static_cast(0)); - kernel.SetArgument(5, static_cast(col_stride)); - kernel.SetArgument(6, kernel_buffer()); - kernel.SetArgument(7, static_cast(kernel_offset)); - kernel.SetArgument(8, result_buffer()); - kernel.SetArgument(9, static_cast(result_offset)); - kernel.SetArgument(10, static_cast(result_stride)); - kernel.SetArgument(11, static_cast(height)); - kernel.SetArgument(12, static_cast(width)); - kernel.SetArgument(13, static_cast(channels)); - kernel.SetArgument(14, static_cast(kernel_h)); - kernel.SetArgument(15, static_cast(kernel_w)); - kernel.SetArgument(16, static_cast(pad_h)); - kernel.SetArgument(17, static_cast(pad_w)); - kernel.SetArgument(18, static_cast(stride_h)); - kernel.SetArgument(19, static_cast(stride_w)); - kernel.SetArgument(20, static_cast(dilation_h)); - kernel.SetArgument(21, static_cast(dilation_w)); - kernel.SetArgument(22, im_buffer()); - kernel.SetArgument(23, static_cast(im_offset)); - kernel.SetArgument(24, static_cast(output_h)); - kernel.SetArgument(25, static_cast(output_w)); + kernel.SetArgument(3, kernel_buffer()); + kernel.SetArgument(4, static_cast(kernel_offset)); + kernel.SetArgument(5, result_buffer()); + kernel.SetArgument(6, static_cast(result_offset)); + kernel.SetArgument(7, static_cast(result_stride)); + if (method_ == ConvGemmMethod::kWithIm2Col) { + kernel.SetArgument(8, col_buffer()); + kernel.SetArgument(9, static_cast(0)); + kernel.SetArgument(10, static_cast(col_stride)); + } + if (method_ == ConvGemmMethod::kSingleKernel) { + kernel.SetArgument(8, im_buffer()); + kernel.SetArgument(9, static_cast(im_offset)); + kernel.SetArgument(10, static_cast(height)); + kernel.SetArgument(11, static_cast(width)); + kernel.SetArgument(12, static_cast(channels)); + kernel.SetArgument(13, static_cast(kernel_h)); + kernel.SetArgument(14, static_cast(kernel_w)); + kernel.SetArgument(15, static_cast(pad_h)); + kernel.SetArgument(16, static_cast(pad_w)); + kernel.SetArgument(17, static_cast(stride_h)); + kernel.SetArgument(18, static_cast(stride_w)); + kernel.SetArgument(19, static_cast(dilation_h)); + kernel.SetArgument(20, static_cast(dilation_w)); + kernel.SetArgument(21, static_cast(output_h)); + kernel.SetArgument(22, static_cast(output_w)); + } // Computes the global and local thread sizes const auto m_ceiled = Ceil(num_patches, db_["WGD"]); diff --git a/src/routines/levelx/xconvgemm.hpp b/src/routines/levelx/xconvgemm.hpp index 01795ea8..ac27657f 100644 --- a/src/routines/levelx/xconvgemm.hpp +++ b/src/routines/levelx/xconvgemm.hpp @@ -27,7 +27,9 @@ class Xconvgemm: public Routine { public: // Constructor - Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM"); + enum class ConvGemmMethod {kWithIm2Col, kSingleKernel}; + Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM", + const ConvGemmMethod method = ConvGemmMethod::kSingleKernel); // Templated-precision implementation of the routine void DoConvgemm(const size_t channels, const size_t height, const size_t width, @@ -39,6 +41,9 @@ class Xconvgemm: public Routine { const Buffer &im_buffer, const size_t im_offset, const Buffer &kernel_buffer, const size_t kernel_offset, const Buffer &result_buffer, const size_t result_offset); + + private: + const ConvGemmMethod method_; }; // ================================================================================================= -- cgit v1.2.3