From 301dc280dfe75ff3c8b219f64aea83a6bf2f0c8d Mon Sep 17 00:00:00 2001 From: Koichi Akabe Date: Tue, 18 Dec 2018 13:56:00 +0900 Subject: Fix xconvgemm kernel and enable ConvGemmMethod::kSingleKernel --- src/kernels/levelx/xconvgemm_part1.opencl | 33 +++++++---- src/kernels/levelx/xconvgemm_part2.opencl | 94 +++++++++++++++++++++++++------ src/routines/levelx/xconvgemm.cpp | 10 ++-- src/routines/levelx/xconvgemm.hpp | 2 +- 4 files changed, 106 insertions(+), 33 deletions(-) diff --git a/src/kernels/levelx/xconvgemm_part1.opencl b/src/kernels/levelx/xconvgemm_part1.opencl index abdb5324..25ccba51 100644 --- a/src/kernels/levelx/xconvgemm_part1.opencl +++ b/src/kernels/levelx/xconvgemm_part1.opencl @@ -11,7 +11,6 @@ // uses parameters from the direct GEMM kernel. This is the part with the loads from memory (1/2). // This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running // the im2col kernel to create a 'col' temporary matrix. -// TODO: Currently only works with 'CONVGEMM_WITH_IM2COL' set // // ================================================================================================= @@ -30,12 +29,17 @@ INLINE_FUNC real GlobalToPrivateCheckedImage(const __global real* restrict image const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w) { + const int dilation_h, const int dilation_w, + const bool kernel_flip) { // Im2col indices const int kernel_2d_index = kwg % (kernel_h * kernel_w); - const int kw_id = kernel_2d_index % kernel_w; - const int kh_id = kernel_2d_index / kernel_w; + const int kw_id = (kernel_flip) + ? kernel_w - kernel_2d_index % kernel_w - 1 + : kernel_2d_index % kernel_w; + const int kh_id = (kernel_flip) + ? kernel_h - kernel_2d_index / kernel_w - 1 + : kernel_2d_index / kernel_w; const int c_id = kwg / (kernel_h * kernel_w); const int h_index = -pad_h + kh_id * dilation_h + stride_h * h_id; const int w_index = -pad_w + kw_id * dilation_w + stride_w * w_id; @@ -55,14 +59,15 @@ INLINE_FUNC real GlobalToPrivateCheckedImage(const __global real* restrict image // Loads global off-chip memory into local (shared) memory on-chip. This function is specific for // loading the image input tensor. This includes a bounds check. -INLINE_FUNC real GlobalToLocalCheckedImage(const __global realMD* restrict imagegm, LOCAL_PTR real* alm, +INLINE_FUNC real GlobalToLocalCheckedImage(const __global real* restrict imagegm, LOCAL_PTR real* alm, const int image_offset_batch, - const int h_id, const int w_id, const int kwg, + const int output_w, const int kwg, const int input_h, const int input_w, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w) { + const int dilation_h, const int dilation_w, + const bool kernel_flip) { #if MDIMCD == MDIMAD const int la0 = get_local_id(0); const int la1 = get_local_id(1); @@ -82,10 +87,17 @@ INLINE_FUNC real GlobalToLocalCheckedImage(const __global realMD* restrict image int idm = mg + GetGroupID0()*WGD; int idk = kg + kwg; + const int w_id = idm % output_w; + const int h_id = idm / output_w; + // Im2col indices const int kernel_2d_index = idk % (kernel_h * kernel_w); - const int kw_id = kernel_2d_index % kernel_w; - const int kh_id = kernel_2d_index / kernel_w; + const int kw_id = (kernel_flip) + ? kernel_w - kernel_2d_index % kernel_w - 1 + : kernel_2d_index % kernel_w; + const int kh_id = (kernel_flip) + ? kernel_h - kernel_2d_index / kernel_w - 1 + : kernel_2d_index / kernel_w; const int c_id = idk / (kernel_h * kernel_w); const int h_index = -pad_h + kh_id * dilation_h + stride_h * h_id; const int w_index = -pad_w + kw_id * dilation_w + stride_w * w_id; @@ -104,7 +116,8 @@ INLINE_FUNC real GlobalToLocalCheckedImage(const __global realMD* restrict image } } -#endif +#endif // defined(ROUTINE_CONVGEMM) && !defined(CONVGEMM_WITH_IM2COL) + // ================================================================================================= // End of the C++11 raw string literal diff --git a/src/kernels/levelx/xconvgemm_part2.opencl b/src/kernels/levelx/xconvgemm_part2.opencl index e0ac24a0..693cb120 100644 --- a/src/kernels/levelx/xconvgemm_part2.opencl +++ b/src/kernels/levelx/xconvgemm_part2.opencl @@ -11,7 +11,6 @@ // uses parameters from the direct GEMM kernel. This part contains the main kernel (2/2). // This uses "CONVGEMM_WITH_IM2COL" as a switch to select between direct convgemm or first running // the im2col kernel to create a 'col' temporary matrix. -// TODO: Currently only works with 'CONVGEMM_WITH_IM2COL' set // // ================================================================================================= @@ -23,20 +22,25 @@ R"( #if defined(ROUTINE_CONVGEMM) // ConvGEMM kernel +#if defined(CONVGEMM_WITH_IM2COL) __kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) void Xconvgemm(const int num_patches, const int num_kernels, const int patch_size, const __global realND* restrict kernelgm, const int kernel_offset, __global real* resultgm, const int result_offset, const int result_stride, -#if defined(CONVGEMM_WITH_IM2COL) const __global realMD* restrict colgm, const int col_offset, const int col_stride) #else - const __global realMD* restrict imagegm, const int image_offset, - const int input_h, const int input_w, const int channels, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int output_h, const int output_w) +INLINE_FUNC void Xconvgemm(const int num_patches, const int num_kernels, const int patch_size, + const __global realND* restrict kernelgm, const int kernel_offset, + __global real* resultgm, const int result_offset, const int result_stride, + const __global realMD* restrict imagegm, const int image_offset, + const int input_h, const int input_w, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int output_h, const int output_w, + LOCAL_PTR real* alm, LOCAL_PTR real* blm, + const bool kernel_flip) #endif { @@ -49,12 +53,16 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz #endif const int result_offset_batch = result_offset + result_stride * batch; +#if defined(CONVGEMM_WITH_IM2COL) __local real alm[WGD * (WGD + PADA)]; __local real blm[WGD * (WGD + PADB)]; +#endif // Extra pointers to scalar versions of global memory #if defined(CONVGEMM_WITH_IM2COL) const __global real* restrict colgms = (const __global real* restrict) colgm; + #else + const __global real* restrict imagegms = (const __global real* restrict) imagegm; #endif const __global real* restrict kernelgms = (const __global real* restrict) kernelgm; @@ -100,10 +108,10 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz GlobalToLocalScalarA(colgms, alm, num_patches, col_offset_batch, kwg, false, false); } #else - GlobalToLocalCheckedImage(imagegm, alm, image_offset_batch, h_id, w_id, kwg, + GlobalToLocalCheckedImage(imagegms, alm, image_offset_batch, output_w, kwg, input_h, input_w, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w); + dilation_h, dilation_w, kernel_flip); #endif if (patch_size % VWND == 0 && kernel_offset % VWND == 0) { GlobalToLocalDirectB(kernelgm, blm, patch_size, kernel_offset, kwg, true, false); @@ -151,10 +159,12 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz #if defined(CONVGEMM_WITH_IM2COL) apd[_mi] = GlobalToPrivateDirectA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false); #else - apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg, + const int w_id = (idm + _mi) % output_w; + const int h_id = (idm + _mi) / output_w; + apd[_mi] = GlobalToPrivateCheckedImage(imagegms, image_offset_batch, h_id, w_id, kwg, input_h, input_w, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w); + dilation_h, dilation_w, kernel_flip); #endif } #pragma unroll @@ -193,10 +203,10 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz #if defined(CONVGEMM_WITH_IM2COL) GlobalToLocalCheckedA(colgms, alm, num_patches, col_offset_batch, kwg, false, false, num_patches, patch_size); #else - GlobalToLocalCheckedImage(imagegm, alm, image_offset_batch, h_id, w_id, kwg, + GlobalToLocalCheckedImage(imagegms, alm, image_offset_batch, output_w, kwg, input_h, input_w, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w); + dilation_h, dilation_w, kernel_flip); #endif GlobalToLocalCheckedB(kernelgms, blm, patch_size, kernel_offset, kwg, true, false, num_kernels, patch_size); barrier(CLK_LOCAL_MEM_FENCE); @@ -239,10 +249,12 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz #if defined(CONVGEMM_WITH_IM2COL) apd[_mi] = GlobalToPrivateCheckedA(colgms, _mi, num_patches, col_offset_batch, idm, kwg, false, false, num_patches); #else - apd[_mi] = GlobalToPrivateCheckedImage(imagegm, image_offset_batch, h_id, w_id, kwg, + const int w_id = (idm + _mi) % output_w; + const int h_id = (idm + _mi) / output_w; + apd[_mi] = GlobalToPrivateCheckedImage(imagegms, image_offset_batch, h_id, w_id, kwg, input_h, input_w, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w); + dilation_h, dilation_w, kernel_flip); #endif } #pragma unroll @@ -272,7 +284,53 @@ void Xconvgemm(const int num_patches, const int num_kernels, const int patch_siz } } -#endif +#if !defined(CONVGEMM_WITH_IM2COL) +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XconvgemmFlip(const int num_patches, const int num_kernels, const int patch_size, + const __global realND* restrict kernelgm, const int kernel_offset, + __global real* resultgm, const int result_offset, const int result_stride, + const __global realMD* restrict imagegm, const int image_offset, + const int input_h, const int input_w, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int output_h, const int output_w) { + const bool kernel_flip = true; + __local real alm[WGD * (WGD + PADA)]; + __local real blm[WGD * (WGD + PADB)]; + Xconvgemm(num_patches, num_kernels, patch_size, + kernelgm, kernel_offset, resultgm, result_offset, result_stride, + imagegm, image_offset, input_h, input_w, channels, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + output_h, output_w, alm, blm, kernel_flip); +} + +__kernel __attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) +void XconvgemmNormal(const int num_patches, const int num_kernels, const int patch_size, + const __global realND* restrict kernelgm, const int kernel_offset, + __global real* resultgm, const int result_offset, const int result_stride, + const __global realMD* restrict imagegm, const int image_offset, + const int input_h, const int input_w, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int output_h, const int output_w) { + const bool kernel_flip = false; + __local real alm[WGD * (WGD + PADA)]; + __local real blm[WGD * (WGD + PADB)]; + Xconvgemm(num_patches, num_kernels, patch_size, + kernelgm, kernel_offset, resultgm, result_offset, result_stride, + imagegm, image_offset, input_h, input_w, channels, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + output_h, output_w, alm, blm, kernel_flip); +} + +#endif // !defined(CONVGEMM_WITH_IM2COL) + +#endif // defined(ROUTINE_CONVGEMM) + // ================================================================================================= // End of the C++11 raw string literal diff --git a/src/routines/levelx/xconvgemm.cpp b/src/routines/levelx/xconvgemm.cpp index 88127b0f..8bd24f15 100644 --- a/src/routines/levelx/xconvgemm.cpp +++ b/src/routines/levelx/xconvgemm.cpp @@ -53,9 +53,6 @@ void Xconvgemm::DoConvgemm(const KernelMode kernel_mode, const Buffer &kernel_buffer, const size_t kernel_offset, const Buffer &result_buffer, const size_t result_offset) { - // TODO: Implement single-kernel approach - assert(method_ == ConvGemmMethod::kWithIm2Col); - // Tests for a valid batch count if (batch_count == 0) { throw BLASError(StatusCode::kInvalidBatchCount); @@ -121,7 +118,12 @@ void Xconvgemm::DoConvgemm(const KernelMode kernel_mode, } // Retrieves the proper XgemmDirect kernel from the compiled binary - auto kernel = Kernel(program_, "Xconvgemm"); + const std::string kernel_name = (method_ == ConvGemmMethod::kWithIm2Col) + ? "Xconvgemm" + : (kernel_mode == KernelMode::kConvolution) + ? "XconvgemmFlip" + : "XconvgemmNormal"; + auto kernel = Kernel(program_, kernel_name); // Sets the kernel arguments kernel.SetArgument(0, static_cast(num_patches)); diff --git a/src/routines/levelx/xconvgemm.hpp b/src/routines/levelx/xconvgemm.hpp index 20cfff60..16082fc6 100644 --- a/src/routines/levelx/xconvgemm.hpp +++ b/src/routines/levelx/xconvgemm.hpp @@ -29,7 +29,7 @@ class Xconvgemm: public Routine { // Constructor enum class ConvGemmMethod {kWithIm2Col, kSingleKernel}; Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM", - const ConvGemmMethod method = ConvGemmMethod::kWithIm2Col); + const ConvGemmMethod method = ConvGemmMethod::kSingleKernel); // Templated-precision implementation of the routine void DoConvgemm(const KernelMode kernel_mode, -- cgit v1.2.3 From 1f0cd618247971f0803aff94ddf795d9e3d19428 Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 29 Jul 2018 15:07:51 +0200 Subject: Added first version of a tuner for the ConvGemm direct kernel --- CMakeLists.txt | 4 +- src/tuning/kernels/xconvgemm.cpp | 38 ++++++++ src/tuning/kernels/xconvgemm.hpp | 186 +++++++++++++++++++++++++++++++++++++++ src/tuning/tuning.cpp | 10 ++- src/tuning/tuning.hpp | 7 ++ 5 files changed, 242 insertions(+), 3 deletions(-) create mode 100644 src/tuning/kernels/xconvgemm.cpp create mode 100644 src/tuning/kernels/xconvgemm.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index fc3101b7..1356a509 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -213,6 +213,7 @@ endif() # Sets the supported routines and the used kernels. New routines and kernels should be added here. set(KERNELS copy_fast copy_pad transpose_fast transpose_pad xaxpy xdot xger xgemm xgemm_direct xgemv invert) +set(KERNELS_EXTRA xconvgemm) # kernels for which not to include a tuner in 'all tuners' target set(DATABASES copy pad padtranspose transpose xaxpy xdot xgemm xgemm_direct xgemv xgemv_fast xgemv_fast_rot xger invert gemm_routine trsv_routine) @@ -434,7 +435,8 @@ if(TUNERS) endif() # Adds tuning executables - foreach(KERNEL ${KERNELS}) + set(ALLKERNELS ${KERNELS} ${KERNELS_EXTRA}) + foreach(KERNEL ${ALLKERNELS}) add_executable(clblast_tuner_${KERNEL} ${TUNERS_COMMON} src/tuning/kernels/${KERNEL}.cpp) target_link_libraries(clblast_tuner_${KERNEL} ${API_LIBRARIES}) target_include_directories(clblast_tuner_${KERNEL} PUBLIC $ ${API_INCLUDE_DIRS}) diff --git a/src/tuning/kernels/xconvgemm.cpp b/src/tuning/kernels/xconvgemm.cpp new file mode 100644 index 00000000..15dfe829 --- /dev/null +++ b/src/tuning/kernels/xconvgemm.cpp @@ -0,0 +1,38 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file uses the auto-tuner to tune the convgemm kernels. +// +// ================================================================================================= + +#include "tuning/kernels/xconvgemm.hpp" + +// Shortcuts to the clblast namespace +using half = clblast::half; +using float2 = clblast::float2; +using double2 = clblast::double2; + +// Function to tune a specific variation V (not within the clblast namespace) +template +void StartVariation(int argc, char *argv[]) { + const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv); + switch(clblast::GetPrecision(command_line_args)) { + case clblast::Precision::kHalf: clblast::Tuner(argc, argv, V, clblast::XConvGemmGetTunerDefaults, clblast::XConvGemmGetTunerSettings, clblast::XConvGemmTestValidArguments, clblast::XConvGemmSetConstraints, clblast::XConvGemmComputeLocalMemSize, clblast::XConvGemmSetArguments); break; + case clblast::Precision::kSingle: clblast::Tuner(argc, argv, V, clblast::XConvGemmGetTunerDefaults, clblast::XConvGemmGetTunerSettings, clblast::XConvGemmTestValidArguments, clblast::XConvGemmSetConstraints, clblast::XConvGemmComputeLocalMemSize, clblast::XConvGemmSetArguments); break; + case clblast::Precision::kDouble: clblast::Tuner(argc, argv, V, clblast::XConvGemmGetTunerDefaults, clblast::XConvGemmGetTunerSettings, clblast::XConvGemmTestValidArguments, clblast::XConvGemmSetConstraints, clblast::XConvGemmComputeLocalMemSize, clblast::XConvGemmSetArguments); break; + } +} + +// Main function (not within the clblast namespace) +int main(int argc, char *argv[]) { + StartVariation<1>(argc, argv); + return 0; +} + +// ================================================================================================= diff --git a/src/tuning/kernels/xconvgemm.hpp b/src/tuning/kernels/xconvgemm.hpp new file mode 100644 index 00000000..83c93860 --- /dev/null +++ b/src/tuning/kernels/xconvgemm.hpp @@ -0,0 +1,186 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file uses the auto-tuner to tune the ConvGemm kernels. These kernels are based on the GEMM +// direct kernel and will use those parameters, this tuner is just optional to use for advanced +// users. +// +// ================================================================================================= + +#include +#include + +#include "utilities/utilities.hpp" +#include "tuning/tuning.hpp" + +namespace clblast { +// ================================================================================================= + +// Helper functions +template +size_t OutputHeight(const Arguments &args) { + const auto size = args.height + 2 * args.pad_h; + const auto padding = args.dilation_h * (args.kernel_h - 1) + 1; + if (size >= padding) { return (size - padding) / args.stride_h + 1; } + return 1; +} +template +size_t OutputWidth(const Arguments &args) { + const auto size = args.width + 2 * args.pad_w; + const auto padding = args.dilation_w * (args.kernel_w - 1) + 1; + if (size >= padding) { return (size - padding) / args.stride_w + 1; } + return 1; +} + +// Settings for this kernel (default command-line arguments) +TunerDefaults XConvGemmGetTunerDefaults(const int) { + auto settings = TunerDefaults(); + settings.options = {kArgChannels, kArgHeight, kArgWidth, kArgKernelH, kArgKernelW, + kArgNumKernels, kArgBatchCount, kArgFraction}; + settings.channels = 32; + settings.height = 64; + settings.width = 64; + settings.kernel_h = 3; + settings.kernel_w = 3; + settings.num_kernels = 32; + settings.default_batch_count = 16; + settings.default_fraction = 1.0; + settings.default_num_runs = 2; + return settings; +} + +// Settings for this kernel (general) +template +TunerSettings XConvGemmGetTunerSettings(const int, const Arguments &args) { + auto settings = TunerSettings(); + + // Identification of the kernel + settings.kernel_family = "xconvgemm"; + settings.kernel_name = "Xconvgemm"; + settings.sources = +"#define ROUTINE_CONVGEMM" +#include "../src/kernels/level3/xgemm_direct_part1.opencl" +#include "../src/kernels/level3/xgemm_direct_part2.opencl" +#include "../src/kernels/level3/xgemm_direct_part3.opencl" +#include "../src/kernels/levelx/xconvgemm_part1.opencl" +#include "../src/kernels/levelx/xconvgemm_part2.opencl" + ; + + // Helper variables + const auto patch_size = args.kernel_h * args.kernel_w * args.channels; + const auto num_patches = OutputHeight(args) * OutputWidth(args); + + // Buffer sizes + settings.size_a = args.batch_count * args.channels * args.height * args.width; + settings.size_b = args.num_kernels * args.channels * args.kernel_h * args.kernel_w; + settings.size_a = args.batch_count * args.num_kernels * OutputHeight(args) * OutputWidth(args); + + // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5) + settings.inputs = {2, 3, 4}; + settings.outputs = {4}; + + // Sets the base thread configuration + settings.global_size = {num_patches, args.num_kernels}; + settings.global_size_ref = settings.global_size; + settings.local_size = {1, 1}; + settings.local_size_ref = {8, 8}; + + // Transforms the thread configuration based on the parameters + settings.mul_local = {{"MDIMCD", "NDIMCD"}}; + settings.mul_global = {{"MDIMCD", "NDIMCD"}}; + settings.div_global = {{"WGD", "WGD"}}; + + // Sets the tuning parameters and their possible values + settings.parameters = { + {"WGD", {8, 16, 32}}, + {"MDIMCD", {8, 16, 32}}, + {"NDIMCD", {8, 16, 32}}, + {"MDIMAD", {8, 16, 32}}, + {"NDIMBD", {8, 16, 32}}, + {"KWID", {1}}, + {"VWMD", {1, 2, 4, 8}}, + {"VWND", {1, 2, 4, 8}}, + {"PADA", {0}}, + {"PADB", {0}}, + }; + + // Describes how to compute the performance metrics + settings.metric_amount = args.batch_count * 2 * num_patches * args.num_kernels * patch_size; + settings.performance_unit = "GFLOPS"; + + return settings; +} + +// Tests for valid arguments +template +void XConvGemmTestValidArguments(const int, const Arguments &) { } +std::vector XConvGemmSetConstraints(const int) { + auto constraints = std::vector(); + auto MultipleOfX = [] (std::vector v) { return IsMultiple(v[0], v[1]); }; + auto MultipleOfXMulY = [] (std::vector v) { return IsMultiple(v[0], v[1]*v[2]); }; + auto MultipleOfXMulYDivZ = [] (std::vector v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); }; + // Requirement for unrolling the WGD loop + constraints.push_back({MultipleOfX, {"WGD", "KWID"}}); + // Required for integer MWID and NWID + constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMCD", "VWMD"}}); + constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMCD", "VWND"}}); + // Required for integer MWIAD and NWIBD + constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMAD", "VWMD"}}); + constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMBD", "VWND"}}); + // WGD has to be a multiple of KDIMAD = ((MDIMCD*NDIMCD)/(MDIMAD)) and KDIMBD = (...) + constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "MDIMAD"}}); + constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "NDIMBD"}}); + + return constraints; +} +template +LocalMemSizeInfo XConvGemmComputeLocalMemSize(const int) { + return { + [] (std::vector v) -> size_t { + return GetBytes(PrecisionValue()) * ((v[0]*(v[0] + v[1]) + v[0]*(v[0] + v[2]))); + }, + {"WGD", "PADA", "PADB"} + }; +} + +// Sets the kernel's arguments +template +void XConvGemmSetArguments(const int, Kernel &kernel, const Arguments &args, std::vector>& buffers) { + const auto output_h = OutputHeight(args); + const auto output_w = OutputWidth(args); + const auto patch_size = args.kernel_h * args.kernel_w * args.channels; + const auto num_patches = output_h * output_w; + const auto result_stride = args.num_kernels * output_h * output_w; + kernel.SetArgument(0, static_cast(num_patches)); + kernel.SetArgument(1, static_cast(args.num_kernels)); + kernel.SetArgument(2, static_cast(patch_size)); + kernel.SetArgument(3, buffers[3]()); // 3 == B matrix ==> kernel buffer + kernel.SetArgument(4, 0); // c_offset + kernel.SetArgument(5, buffers[4]()); // 4 == C matrix ==> result buffer + kernel.SetArgument(6, 0); // c_offset + kernel.SetArgument(7, static_cast(result_stride)); + kernel.SetArgument(8, buffers[2]()); // 2 == A matrix ==> image buffer + kernel.SetArgument(9, 0); // c_offset + kernel.SetArgument(10, static_cast(args.height)); + kernel.SetArgument(11, static_cast(args.width)); + kernel.SetArgument(12, static_cast(args.channels)); + kernel.SetArgument(13, static_cast(args.kernel_h)); + kernel.SetArgument(14, static_cast(args.kernel_w)); + kernel.SetArgument(15, 0); // pad_h + kernel.SetArgument(16, 0); // pad_w + kernel.SetArgument(17, 1); // stride_h + kernel.SetArgument(18, 1); // stride_w + kernel.SetArgument(19, 1); // dilation_h + kernel.SetArgument(20, 1); // dilation_w + kernel.SetArgument(21, static_cast(output_h)); + kernel.SetArgument(22, static_cast(output_w)); +} + +// ================================================================================================= +} // namespace clblast diff --git a/src/tuning/tuning.cpp b/src/tuning/tuning.cpp index d382fb18..f76af774 100644 --- a/src/tuning/tuning.cpp +++ b/src/tuning/tuning.cpp @@ -122,8 +122,14 @@ void Tuner(int argc, char* argv[], const int V, if (o == kArgM) { args.m = GetArgument(command_line_args, help, kArgM, defaults.default_m); } if (o == kArgN) { args.n = GetArgument(command_line_args, help, kArgN, defaults.default_n); } if (o == kArgK) { args.k = GetArgument(command_line_args, help, kArgK, defaults.default_k); } - if (o == kArgAlpha) { args.alpha = GetArgument(command_line_args, help, kArgAlpha, GetScalar()); } - if (o == kArgBeta) { args.beta = GetArgument(command_line_args, help, kArgBeta, GetScalar()); } + if (o == kArgChannels) { args.channels = GetArgument(command_line_args, help, kArgChannels, defaults.channels); } + if (o == kArgHeight) { args.height = GetArgument(command_line_args, help, kArgHeight, defaults.height); } + if (o == kArgWidth) { args.width = GetArgument(command_line_args, help, kArgWidth, defaults.width); } + if (o == kArgKernelH) { args.kernel_h = GetArgument(command_line_args, help, kArgKernelH, defaults.kernel_h); } + if (o == kArgKernelW) { args.kernel_w = GetArgument(command_line_args, help, kArgKernelW, defaults.kernel_w); } + if (o == kArgNumKernels) { args.num_kernels = GetArgument(command_line_args, help, kArgNumKernels, defaults.num_kernels); } + if (o == kArgAlpha) { args.alpha = GetArgument(command_line_args, help, kArgAlpha, GetScalar()); } + if (o == kArgBeta) { args.beta = GetArgument(command_line_args, help, kArgBeta, GetScalar()); } if (o == kArgBatchCount) { args.batch_count = GetArgument(command_line_args, help, kArgBatchCount, defaults.default_batch_count); } } args.fraction = GetArgument(command_line_args, help, kArgFraction, defaults.default_fraction); diff --git a/src/tuning/tuning.hpp b/src/tuning/tuning.hpp index 37a042ff..61cc3bda 100644 --- a/src/tuning/tuning.hpp +++ b/src/tuning/tuning.hpp @@ -41,6 +41,13 @@ struct TunerDefaults { size_t default_m = 1; size_t default_n = 1; size_t default_k = 1; + size_t channels = 1; + size_t height = 1; + size_t width = 1; + size_t kernel_h = 3; + size_t kernel_w = 3; + size_t num_kernels = 1; + size_t batch_count = 1; // Other defaults size_t default_batch_count = 1; -- cgit v1.2.3 From a8e6f813ddb7a8f608077d035583114fd2e763dd Mon Sep 17 00:00:00 2001 From: Koichi Akabe Date: Tue, 18 Dec 2018 14:05:25 +0900 Subject: Fix the xconvgemm tuner --- src/tuning/kernels/xconvgemm.hpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tuning/kernels/xconvgemm.hpp b/src/tuning/kernels/xconvgemm.hpp index 83c93860..9ba70f5e 100644 --- a/src/tuning/kernels/xconvgemm.hpp +++ b/src/tuning/kernels/xconvgemm.hpp @@ -44,8 +44,8 @@ TunerDefaults XConvGemmGetTunerDefaults(const int) { settings.options = {kArgChannels, kArgHeight, kArgWidth, kArgKernelH, kArgKernelW, kArgNumKernels, kArgBatchCount, kArgFraction}; settings.channels = 32; - settings.height = 64; - settings.width = 64; + settings.height = 66; + settings.width = 66; // num_patches = 64x64 = 4096 settings.kernel_h = 3; settings.kernel_w = 3; settings.num_kernels = 32; @@ -62,7 +62,7 @@ TunerSettings XConvGemmGetTunerSettings(const int, const Arguments &args) { // Identification of the kernel settings.kernel_family = "xconvgemm"; - settings.kernel_name = "Xconvgemm"; + settings.kernel_name = "XconvgemmNormal"; settings.sources = "#define ROUTINE_CONVGEMM" #include "../src/kernels/level3/xgemm_direct_part1.opencl" @@ -79,7 +79,7 @@ TunerSettings XConvGemmGetTunerSettings(const int, const Arguments &args) { // Buffer sizes settings.size_a = args.batch_count * args.channels * args.height * args.width; settings.size_b = args.num_kernels * args.channels * args.kernel_h * args.kernel_w; - settings.size_a = args.batch_count * args.num_kernels * OutputHeight(args) * OutputWidth(args); + settings.size_c = args.batch_count * args.num_kernels * OutputHeight(args) * OutputWidth(args); // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5) settings.inputs = {2, 3, 4}; -- cgit v1.2.3 From c0883cf2fe595e4335c7e69179d5072c4d530a48 Mon Sep 17 00:00:00 2001 From: Koichi Akabe Date: Tue, 18 Dec 2018 14:08:16 +0900 Subject: Update the documentation --- doc/routines.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/routines.md b/doc/routines.md index a4cb5e57..3ba8283e 100644 --- a/doc/routines.md +++ b/doc/routines.md @@ -94,7 +94,7 @@ In addition, some extra non-BLAS routines are also supported by CLBlast, classif | xOMATCOPY | ✔ | ✔ | ✔ | ✔ | ✔ | (Out-of-place copying/transposing/scaling of matrices) | xIM2COL | ✔ | ✔ | ✔ | ✔ | ✔ | (Image to column transform as used to express convolution as GEMM) | xCOL2IM | ✔ | ✔ | ✔ | ✔ | ✔ | (Column to image transform as used in machine learning) -| xCONVGEMM | ✔ | ✔ | - | - | ✔ | (Experimental, implemented as im2col followed by batched GEMM) +| xCONVGEMM | ✔ | ✔ | - | - | ✔ | (Experimental, implemented as either im2col followed by batched GEMM or as a single kernel) Some less commonly used BLAS routines are not yet supported by CLBlast. They are xROTG, xROTMG, xROT, xROTM, xTBSV, and xTPSV. -- cgit v1.2.3 From 9532f8652c33b7f2d8674a3a848e745202378c7e Mon Sep 17 00:00:00 2001 From: Koichi Akabe Date: Fri, 21 Dec 2018 11:08:01 +0900 Subject: Update changelog --- CHANGELOG | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG b/CHANGELOG index 2241b855..4587940a 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,6 @@ +Development (next version) +- Implemented single-kernel version of convolution as GEMM +- Various minor fixes and enhancements Version 1.5.0 - Added support for shuffle instructions for NVIDIA GPUs (thanks to 'tyler-utah') -- cgit v1.2.3