From 9fb2c61b256ccf66b6a7b6f605008125288d60cf Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 7 Jan 2018 14:27:15 +0100 Subject: Added API and tests for new GemmStridedBatched routine --- src/routines/levelx/xgemmstridedbatched.cpp | 297 ++++++++++++++++++++++++++++ src/routines/levelx/xgemmstridedbatched.hpp | 66 +++++++ src/routines/routines.hpp | 1 + 3 files changed, 364 insertions(+) create mode 100644 src/routines/levelx/xgemmstridedbatched.cpp create mode 100644 src/routines/levelx/xgemmstridedbatched.hpp (limited to 'src/routines') diff --git a/src/routines/levelx/xgemmstridedbatched.cpp b/src/routines/levelx/xgemmstridedbatched.cpp new file mode 100644 index 00000000..3ea52980 --- /dev/null +++ b/src/routines/levelx/xgemmstridedbatched.cpp @@ -0,0 +1,297 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements the XgemmStridedBatched class (see the header for information about the class). +// +// ================================================================================================= + +#include "routines/levelx/xgemmstridedbatched.hpp" +#include "routines/level3/xgemm.hpp" + +#include +#include + +namespace clblast { +// ================================================================================================= + +// Constructor: forwards to base class constructor +template +XgemmStridedBatched::XgemmStridedBatched(Queue &queue, EventPointer event, const std::string &name): + Routine(queue, event, name, {"Copy","Pad","Transpose","Padtranspose","Xgemm","XgemmDirect","GemmRoutine"}, + PrecisionValue(), {}, { + #include "../../kernels/level3/level3.opencl" + #include "../../kernels/level3/copy_fast.opencl" + #include "../../kernels/level3/copy_pad.opencl" + #include "../../kernels/level3/transpose_fast.opencl" + #include "../../kernels/level3/transpose_pad.opencl" + , // separated in multiple parts to prevent C1091 in MSVC 2013 + #include "../../kernels/level3/xgemm_direct_part1.opencl" + #include "../../kernels/level3/xgemm_direct_part2.opencl" + #include "../../kernels/level3/xgemm_direct_part3.opencl" + , // separated in multiple parts to prevent C1091 in MSVC 2013 + #include "../../kernels/level3/xgemm_part1.opencl" + #include "../../kernels/level3/xgemm_part2.opencl" + #include "../../kernels/level3/xgemm_part3.opencl" + #include "../../kernels/level3/xgemm_part4.opencl" + , // separated in multiple parts to prevent C1091 in MSVC 2013 + #include "../../kernels/level3/xgemm_batched.opencl" + #include "../../kernels/level3/xgemm_direct_batched.opencl" + }) { +} + +// ================================================================================================= + +// The main routine +template +void XgemmStridedBatched::DoGemmStridedBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose, + const size_t m, const size_t n, const size_t k, const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride, + const Buffer &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta, + const Buffer &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride, + const size_t batch_count) { + + // Tests for a valid batch count + if (batch_count < 1) { + throw BLASError(StatusCode::kInvalidBatchCount); + } + + // Computes the transpose/conjugate options and sets the a/b/c sizes based on that + bool a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate; + size_t a_one, a_two, b_one, b_two, c_one, c_two; + Xgemm::ProcessArguments(layout, a_transpose, b_transpose, m, n, k, + a_one, a_two, b_one, b_two, c_one, c_two, + a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate); + + // Tests the matrices for validity + for (auto batch = size_t{0}; batch < batch_count; ++batch) { + TestMatrixA(a_one, a_two, a_buffer, a_offset + a_stride * batch, a_ld); + TestMatrixB(b_one, b_two, b_buffer, b_offset + b_stride * batch, b_ld); + TestMatrixC(c_one, c_two, c_buffer, c_offset + c_stride * batch, c_ld); + } + + // Selects which version of the batched GEMM to run + const auto do_gemm_direct = true; + if (do_gemm_direct) { // single generic kernel + BatchedGemmDirect(m, n, k, alpha, + a_buffer, a_offset, a_ld, a_stride, + b_buffer, b_offset, b_ld, b_stride, beta, + c_buffer, c_offset, c_ld, c_stride, + a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate, + batch_count); + } + else { // pre/post-processing plus a very fast kernel + BatchedGemmIndirect(m, n, k, alpha, + a_buffer, a_offset, a_ld, a_stride, + b_buffer, b_offset, b_ld, b_stride, beta, + c_buffer, c_offset, c_ld, c_stride, + a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate, + a_one, a_two, b_one, b_two, c_one, c_two, batch_count); + } +} + + +// ================================================================================================= + +// The indirect version of batched GEMM. This uses the faster but non-general kernel. It has specific +// requirements, but several pre and post-processing kernels take care of those. However, the +// overhead of these extra kernels might not be ideal for certain devices/arguments. +template +void XgemmStridedBatched::BatchedGemmIndirect(const size_t m, const size_t n, const size_t k, const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride, + const Buffer &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta, + const Buffer &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride, + const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose, + const bool a_conjugate, const bool b_conjugate, + const size_t a_one, const size_t a_two, + const size_t b_one, const size_t b_two, + const size_t c_one, const size_t c_two, + const size_t batch_count) { + // Calculates the ceiled versions of m, n, and k + const auto m_ceiled = Ceil(Ceil(m, db_["MWG"]), db_["VWM"]); + const auto n_ceiled = Ceil(Ceil(n, db_["NWG"]), db_["VWN"]); + const auto k_ceiled = Ceil(Ceil(k, db_["KWG"]), db_["VWM"]); + + // Computes the first and second "internal" (ceiled) dimensions of the 3 matrices taking into account + // whether the matrices need to be rotated or not for the kernel. + size_t a_one_i, a_two_i, b_one_i, b_two_i, c_one_i, c_two_i; + Xgemm::CalculateInternalDimensions(m, n, k, db_["MWG"], db_["NWG"], db_["KWG"], + a_one_i, a_two_i, b_one_i, b_two_i, c_one_i, c_two_i); + + /* TODO + // Sets the "internal" offsets, i.e. the perfect offsets + auto a_offsets_i = 0;//std::vector(batch_count); + auto b_offsets_i = 0;//std::vector(batch_count); + auto c_offsets_i = 0;//std::vector(batch_count); + + // Determines whether or not temporary matrices are needed + auto a_no_temp = a_one == a_one_i && a_two == a_two_i && a_ld == a_one && a_offsets == a_offsets_i && + !a_do_transpose && !a_conjugate; + auto b_no_temp = b_one == b_one_i && b_two == b_two_i && b_ld == b_one && b_offsets == b_offsets_i && + !b_do_transpose && !b_conjugate; + auto c_no_temp = c_one == c_one_i && c_two == c_two_i && c_ld == c_one && c_offsets == c_offsets_i && + !c_do_transpose; + + // Creates the temporary matrices + const auto a_temp = (a_no_temp) ? a_buffer : Buffer(context_, batch_count * a_one_i * a_two_i); + const auto b_temp = (b_no_temp) ? b_buffer : Buffer(context_, batch_count * b_one_i * b_two_i); + const auto c_temp = (c_no_temp) ? c_buffer : Buffer(context_, batch_count * c_one_i * c_two_i); + + // Events of all kernels (including pre/post processing kernels) + auto eventWaitList = std::vector(); + auto emptyEventList = std::vector(); + + // Runs the pre-processing kernel for matrix A. This transposes the matrix, but also pads zeros + // to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In + // case nothing has to be done, these kernels can be skipped. + if (!a_no_temp) { + auto a_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto a_offsets_i_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + a_offsets_device.Write(queue_, batch_count, a_offsets); + a_offsets_i_device.Write(queue_, batch_count, a_offsets_i); + auto eventProcessA = Event(); + PadCopyTransposeMatrixBatched(queue_, device_, db_, eventProcessA.pointer(), emptyEventList, + a_one, a_two, a_ld, a_offsets_device, a_buffer, + a_one_i, a_two_i, a_one_i, a_offsets_i_device, a_temp, + program_, true, a_do_transpose, a_conjugate, batch_count); + eventWaitList.push_back(eventProcessA); + } + + // As above, but now for matrix B + if (!b_no_temp) { + auto b_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto b_offsets_i_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + b_offsets_device.Write(queue_, batch_count, b_offsets); + b_offsets_i_device.Write(queue_, batch_count, b_offsets_i); + auto eventProcessB = Event(); + PadCopyTransposeMatrixBatched(queue_, device_, db_, eventProcessB.pointer(), emptyEventList, + b_one, b_two, b_ld, b_offsets_device, b_buffer, + b_one_i, b_two_i, b_one_i, b_offsets_i_device, b_temp, + program_, true, b_do_transpose, b_conjugate, batch_count); + eventWaitList.push_back(eventProcessB); + } + + // As above, but now for matrix C + auto c_offsets_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + auto c_offsets_i_device = Buffer(context_, BufferAccess::kReadWrite, batch_count); + if (!c_no_temp) { + c_offsets_device.Write(queue_, batch_count, c_offsets); + c_offsets_i_device.Write(queue_, batch_count, c_offsets_i); + auto eventProcessC = Event(); + PadCopyTransposeMatrixBatched(queue_, device_, db_, eventProcessC.pointer(), emptyEventList, + c_one, c_two, c_ld, c_offsets_device, c_buffer, + c_one_i, c_two_i, c_one_i, c_offsets_i_device, c_temp, + program_, true, c_do_transpose, false, batch_count); + eventWaitList.push_back(eventProcessC); + } + + // Retrieves the Xgemm kernel from the compiled binary + auto kernel = Kernel(program_, "XgemmStridedBatched"); + + // Sets the kernel arguments + kernel.SetArgument(0, static_cast(m_ceiled)); + kernel.SetArgument(1, static_cast(n_ceiled)); + kernel.SetArgument(2, static_cast(k_ceiled)); + kernel.SetArgument(3, alpha); + kernel.SetArgument(4, beta); + kernel.SetArgument(5, a_temp()); + kernel.SetArgument(6, static_cast(a_one_i)); + kernel.SetArgument(7, static_cast(a_two_i)); + kernel.SetArgument(8, b_temp()); + kernel.SetArgument(9, static_cast(b_one_i)); + kernel.SetArgument(10, static_cast(b_two_i)); + kernel.SetArgument(11, c_temp()); + kernel.SetArgument(12, static_cast(c_one_i)); + kernel.SetArgument(13, static_cast(c_two_i)); + + // Computes the global and local thread sizes + const auto global = std::vector{ + (c_one_i * db_["MDIMC"]) / db_["MWG"], + (c_two_i * db_["NDIMC"]) / db_["NWG"], + batch_count + }; + const auto local = std::vector{db_["MDIMC"], db_["NDIMC"], 1}; + + // Launches the kernel + auto eventKernel = Event(); + auto eventPointer = eventKernel.pointer(); + RunKernel(kernel, queue_, device_, global, local, eventPointer, eventWaitList); + + // Runs the post-processing kernel if needed + if (!c_no_temp) { + eventWaitList.push_back(eventKernel); + PadCopyTransposeMatrixBatched(queue_, device_, db_, event_, eventWaitList, + c_one_i, c_two_i, c_one_i, c_offsets_i_device, c_temp, + c_one, c_two, c_ld, c_offsets_device, c_buffer, + program_, false, c_do_transpose, false, batch_count); + } + */ +} + +// ================================================================================================= + +// The direct version of batched GEMM, requiring just one kernel, no pre or post-processing kernels. +template +void XgemmStridedBatched::BatchedGemmDirect(const size_t m, const size_t n, const size_t k, const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride, + const Buffer &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta, + const Buffer &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride, + const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose, + const bool a_conjugate, const bool b_conjugate, + const size_t batch_count) { +/* TODO + // Retrieves the proper XgemmDirect kernel from the compiled binary + const auto name = (a_do_transpose) ? (b_do_transpose ? "XgemmDirectBatchedTT" : "XgemmDirectBatchedTN") : + (b_do_transpose ? "XgemmDirectBatchedNT" : "XgemmDirectBatchedNN"); + auto kernel = Kernel(program_, name); + + // Sets the kernel arguments + kernel.SetArgument(0, static_cast(m)); + kernel.SetArgument(1, static_cast(n)); + kernel.SetArgument(2, static_cast(k)); + kernel.SetArgument(3, alpha); + kernel.SetArgument(4, beta); + kernel.SetArgument(5, a_buffer()); + kernel.SetArgument(6, a_offset); + kernel.SetArgument(7, static_cast(a_ld)); + kernel.SetArgument(8, b_buffer()); + kernel.SetArgument(9, b_offset); + kernel.SetArgument(10, static_cast(b_ld)); + kernel.SetArgument(11, c_buffer()); + kernel.SetArgument(12, c_offset); + kernel.SetArgument(13, static_cast(c_ld)); + kernel.SetArgument(14, static_cast(c_do_transpose)); + kernel.SetArgument(15, static_cast(a_conjugate)); + kernel.SetArgument(16, static_cast(b_conjugate)); + + // Computes the global and local thread sizes + const auto m_ceiled = Ceil(m, db_["WGD"]); + const auto n_ceiled = Ceil(n, db_["WGD"]); + const auto global = std::vector{ + (m_ceiled * db_["MDIMCD"]) / db_["WGD"], + (n_ceiled * db_["NDIMCD"]) / db_["WGD"], + batch_count + }; + const auto local = std::vector{db_["MDIMCD"], db_["NDIMCD"], 1}; + + // Launches the kernel + RunKernel(kernel, queue_, device_, global, local, event_); + */ +} + +// ================================================================================================= + +// Compiles the templated class +template class XgemmStridedBatched; +template class XgemmStridedBatched; +template class XgemmStridedBatched; +template class XgemmStridedBatched; +template class XgemmStridedBatched; + +// ================================================================================================= +} // namespace clblast diff --git a/src/routines/levelx/xgemmstridedbatched.hpp b/src/routines/levelx/xgemmstridedbatched.hpp new file mode 100644 index 00000000..0dbbcb10 --- /dev/null +++ b/src/routines/levelx/xgemmstridedbatched.hpp @@ -0,0 +1,66 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements the XgemmStridedBatched routine. This is a non-blas batched version of GEMM. +// +// ================================================================================================= + +#ifndef CLBLAST_ROUTINES_XGEMMSTRIDEDBATCHED_H_ +#define CLBLAST_ROUTINES_XGEMMSTRIDEDBATCHED_H_ + +#include + +#include "routine.hpp" + +namespace clblast { +// ================================================================================================= + +// See comment at top of file for a description of the class +template +class XgemmStridedBatched: public Routine { +public: + + // Constructor + XgemmStridedBatched(Queue &queue, EventPointer event, const std::string &name = "GEMMSTRIDEDBATCHED"); + + // Templated-precision implementation of the routine + void DoGemmStridedBatched(const Layout layout, const Transpose a_transpose, const Transpose b_transpose, + const size_t m, const size_t n, const size_t k, const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride, + const Buffer &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta, + const Buffer &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride, + const size_t batch_count); + + // Indirect version of strided batched GEMM (with pre and post-processing kernels) + void BatchedGemmIndirect(const size_t m, const size_t n, const size_t k, const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride, + const Buffer &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta, + const Buffer &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride, + const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose, + const bool a_conjugate, const bool b_conjugate, + const size_t a_one, const size_t a_two, + const size_t b_one, const size_t b_two, + const size_t c_one, const size_t c_two, + const size_t batch_count); + + // Direct version of strided batched GEMM (no pre and post-processing kernels) + void BatchedGemmDirect(const size_t m, const size_t n, const size_t k, const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, const size_t a_stride, + const Buffer &b_buffer, const size_t b_offset, const size_t b_ld, const size_t b_stride, const T beta, + const Buffer &c_buffer, const size_t c_offset, const size_t c_ld, const size_t c_stride, + const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose, + const bool a_conjugate, const bool b_conjugate, + const size_t batch_count); +}; + +// ================================================================================================= +} // namespace clblast + +// CLBLAST_ROUTINES_XGEMMSTRIDEDBATCHED_H_ +#endif diff --git a/src/routines/routines.hpp b/src/routines/routines.hpp index 9e7768b9..0aeff707 100644 --- a/src/routines/routines.hpp +++ b/src/routines/routines.hpp @@ -71,6 +71,7 @@ #include "routines/levelx/xim2col.hpp" #include "routines/levelx/xaxpybatched.hpp" #include "routines/levelx/xgemmbatched.hpp" +#include "routines/levelx/xgemmstridedbatched.hpp" // CLBLAST_ROUTINES_ROUTINES_H_ #endif -- cgit v1.2.3