From 93dddda63e4345961a779ee125d748c1eeef4769 Mon Sep 17 00:00:00 2001 From: CNugteren Date: Fri, 18 Sep 2015 17:46:41 +0200 Subject: Improved the organization and performance of level 2 routines --- src/kernels/level2/xgemv.opencl | 80 +++++++++++++++++++------ src/kernels/matrix_transforms/gbgemt.opencl | 60 ------------------- src/kernels/matrix_transforms/transforms.opencl | 40 ------------- src/routines/level2/xgbmv.cc | 80 +++++-------------------- src/routines/level2/xgemv.cc | 55 +++++++++++++---- src/routines/level2/xhemv.cc | 62 ++++--------------- src/routines/level2/xsymv.cc | 62 ++++--------------- 7 files changed, 144 insertions(+), 295 deletions(-) delete mode 100644 src/kernels/matrix_transforms/gbgemt.opencl delete mode 100644 src/kernels/matrix_transforms/transforms.opencl (limited to 'src') diff --git a/src/kernels/level2/xgemv.opencl b/src/kernels/level2/xgemv.opencl index 1e12dd78..0ecfc960 100644 --- a/src/kernels/level2/xgemv.opencl +++ b/src/kernels/level2/xgemv.opencl @@ -79,22 +79,61 @@ R"( #endif // ================================================================================================= -// Defines how to load the input matrix in the regular case -// Loads a scalar input value +// Defines how to load the input matrix in the non-vectorized case inline real LoadMatrixA(const __global real* restrict agm, const int x, const int y, - const int a_ld, const int a_offset) { - return agm[x + a_ld*y + a_offset]; + const int a_ld, const int a_offset, const int reversed, + const int kl, const int ku) { + real result; + + // For symmetric matrices + #if defined(ROUTINE_SYMV) + if ((reversed == 0 && y <= x) || (reversed == 1 && x <= y)) { + result = agm[y*a_ld + x + a_offset]; + } + else { + result = agm[x*a_ld + y + a_offset]; + } + + // For hermitian matrices + #elif defined(ROUTINE_HEMV) + if ((reversed == 0 && y <= x) || (reversed == 1 && x <= y)) { + result = agm[y*a_ld + x + a_offset]; + if (x == y) { result.y = ZERO; } + } + else { + result = agm[x*a_ld + y + a_offset]; + COMPLEX_CONJUGATE(result); + } + + // For banded matrices + #elif defined(ROUTINE_GBMV) + const int k = ku-y+x; + if (x >= y-ku && x < y+kl+1) { + result = agm[a_ld*y + k + a_offset]; + } + else { + SetToZero(result); + } + + // For general matrices + #else + result = agm[a_ld*y + x + a_offset]; + #endif + + return result; } + // Loads a vector input value (1/2) inline realVF LoadMatrixAVF(const __global realVF* restrict agm, const int x, const int y, const int a_ld) { - return agm[x + a_ld*y]; + return agm[a_ld*y + x]; } + // Loads a vector input value (2/2): as before, but different data-type inline realVFR LoadMatrixAVFR(const __global realVFR* restrict agm, const int x, const int y, const int a_ld) { - return agm[x + a_ld*y]; + return agm[a_ld*y + x]; } // ================================================================================================= @@ -106,7 +145,8 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta, const __global real* restrict agm, const int a_offset, const int a_ld, const __global real* restrict xgm, const int x_offset, const int x_inc, __global real* ygm, const int y_offset, const int y_inc, - const int do_conjugate) { + const int do_conjugate, const int reversed, + const int kl, const int ku) { // Local memory for the vector X __local real xlm[WGS1]; @@ -141,20 +181,20 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta, // The multiply-add function for the main part (divisable by WGS1) if (a_rotated == 0) { // Not rotated #pragma unroll - for (int kl=0; kl -// -// This file contains the general banded (gb) to general (ge) matrix transforms. -// -// This kernel uses the matrix-transforms common tuning parameters. -// -// ================================================================================================= - -// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string -// literal). Comment-out this line for syntax-highlighting when developing. -R"( - -// ================================================================================================= -#if defined(ROUTINE_GBMV) - -// Kernel to transform a general banded matrix into a general matrix -__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1))) -__kernel void GeneralBandedToGeneral(const int src_one, const int src_two, - const int src_ld, const int src_offset, - __global const real* restrict src, - const int dest_one, const int dest_two, - const int dest_ld, const int dest_offset, - __global real* dest, - const int layout, - const int kl, const int ku) { - - // Loops over the work per thread in both dimensions - #pragma unroll - for (int w_one=0; w_one= id_two - ku) && (id_one < id_two + kl + 1)) { - result = src[id_two*src_ld + k + src_offset]; - } - dest[id_two*dest_ld + id_one + dest_offset] = result; - } - } - } -} - -#endif -// ================================================================================================= - -// End of the C++11 raw string literal -)" - -// ================================================================================================= diff --git a/src/kernels/matrix_transforms/transforms.opencl b/src/kernels/matrix_transforms/transforms.opencl deleted file mode 100644 index 01889a13..00000000 --- a/src/kernels/matrix_transforms/transforms.opencl +++ /dev/null @@ -1,40 +0,0 @@ - -// ================================================================================================= -// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This -// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- -// width of 100 characters per line. -// -// Author(s): -// Cedric Nugteren -// -// This file contains the common functions and parameters specific for matrix-transform kernels. -// -// ================================================================================================= - -// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string -// literal). Comment-out this line for syntax-highlighting when developing. -R"( - -// ================================================================================================= - -// Parameters set by the tuner or by the database. Here they are given a basic default value in case -// this kernel file is used outside of the CLBlast library. -#ifndef PAD_DIMX - #define PAD_DIMX 8 // Local workgroup size in the first dimension (x) -#endif -#ifndef PAD_DIMY - #define PAD_DIMY 8 // Local workgroup size in the second dimension (y) -#endif -#ifndef PAD_WPTX - #define PAD_WPTX 1 // Work per thread in the first dimension (x) -#endif -#ifndef PAD_WPTY - #define PAD_WPTY 1 // Work per thread in the second dimension (y) -#endif - -// ================================================================================================= - -// End of the C++11 raw string literal -)" - -// ================================================================================================= diff --git a/src/routines/level2/xgbmv.cc b/src/routines/level2/xgbmv.cc index eac208b3..8657c254 100644 --- a/src/routines/level2/xgbmv.cc +++ b/src/routines/level2/xgbmv.cc @@ -37,72 +37,22 @@ StatusCode Xgbmv::DoGbmv(const Layout layout, const Transpose a_transpose, const T beta, const Buffer &y_buffer, const size_t y_offset, const size_t y_inc) { - // Makes sure all dimensions are larger than zero - if (n == 0 || m == 0) { return StatusCode::kInvalidDimension; } - - // + // Reverses the upper and lower band count auto rotated = (layout == Layout::kRowMajor); - auto t_one = (rotated) ? n : m; - auto t_two = (rotated) ? m : n; - auto a_one = kl+ku+1; - auto a_two = (rotated) ? m : n; - - // Checks for validity of the A matrix - auto status = StatusCode::kSuccess; - if (a_ld < a_one) { return StatusCode::kInvalidLeadDimA; } - try { - auto required_size = (a_ld*a_two + a_offset)*sizeof(T); - auto buffer_size = a_buffer.GetSize(); - if (buffer_size < required_size) { return StatusCode::kInsufficientMemoryA; } - } catch (...) { return StatusCode::kInvalidMatrixA; } - - // Temporary buffer to generalize the input matrix - try { - auto t_buffer = Buffer(context_, t_one*t_two); - - // Creates a general matrix from the input to be able to run the regular Xgemv routine - try { - auto& program = GetProgramFromCache(); - auto kernel = Kernel(program, "GeneralBandedToGeneral"); - - // Sets the arguments for the matrix transform kernel - kernel.SetArgument(0, static_cast(a_one)); - kernel.SetArgument(1, static_cast(a_two)); - kernel.SetArgument(2, static_cast(a_ld)); - kernel.SetArgument(3, static_cast(a_offset)); - kernel.SetArgument(4, a_buffer()); - kernel.SetArgument(5, static_cast(t_one)); - kernel.SetArgument(6, static_cast(t_two)); - kernel.SetArgument(7, static_cast(t_one)); - kernel.SetArgument(8, static_cast(0)); - kernel.SetArgument(9, t_buffer()); - kernel.SetArgument(10, static_cast(layout)); - if (rotated) { - kernel.SetArgument(11, static_cast(ku)); - kernel.SetArgument(12, static_cast(kl)); - } - else { - kernel.SetArgument(11, static_cast(kl)); - kernel.SetArgument(12, static_cast(ku)); - } - - // Uses the common matrix-transforms thread configuration - auto global = std::vector{Ceil(CeilDiv(t_one, db_["PAD_WPTX"]), db_["PAD_DIMX"]), - Ceil(CeilDiv(t_two, db_["PAD_WPTY"]), db_["PAD_DIMY"])}; - auto local = std::vector{db_["PAD_DIMX"], db_["PAD_DIMY"]}; - status = RunKernel(kernel, global, local); - if (ErrorIn(status)) { return status; } - - // Runs the regular Xgemv code - status = DoGemv(layout, a_transpose, m, n, alpha, - t_buffer, 0, t_one, - x_buffer, x_offset, x_inc, beta, - y_buffer, y_offset, y_inc); - - // Return the status of the Xgemv routine - return status; - } catch (...) { return StatusCode::kInvalidKernel; } - } catch (...) { return StatusCode::kTempBufferAllocFailure; } + auto kl_real = (rotated) ? ku : kl; + auto ku_real = (rotated) ? kl : ku; + + // Runs the generic matrix-vector multiplication, disabling the use of fast vectorized kernels. + // The specific hermitian matrix-accesses are implemented in the kernel guarded by the + // ROUTINE_GBMV define. + bool fast_kernels = false; + return MatVec(layout, a_transpose, + m, n, alpha, + a_buffer, a_offset, a_ld, + x_buffer, x_offset, x_inc, beta, + y_buffer, y_offset, y_inc, + fast_kernels, fast_kernels, + false, kl_real, ku_real); } // ================================================================================================= diff --git a/src/routines/level2/xgemv.cc b/src/routines/level2/xgemv.cc index e52d2f20..6e2303c0 100644 --- a/src/routines/level2/xgemv.cc +++ b/src/routines/level2/xgemv.cc @@ -32,9 +32,6 @@ template Xgemv::Xgemv(Queue &queue, Event &event, const std::string &name): Routine(queue, event, name, {"Pad", "Xgemv"}, precision_) { source_string_ = - #include "../../kernels/pad.opencl" // TODO: replace - #include "../../kernels/matrix_transforms/transforms.opencl" - #include "../../kernels/matrix_transforms/gbgemt.opencl" #include "../../kernels/level2/xgemv.opencl" ; } @@ -51,6 +48,30 @@ StatusCode Xgemv::DoGemv(const Layout layout, const Transpose a_transpose, const T beta, const Buffer &y_buffer, const size_t y_offset, const size_t y_inc) { + // Performs the matrix-vector multiplication + return MatVec(layout, a_transpose, + m, n, alpha, + a_buffer, a_offset, a_ld, + x_buffer, x_offset, x_inc, beta, + y_buffer, y_offset, y_inc, + true, true, + false, 0, 0); // N/A for this routine +} + +// ================================================================================================= + +// The generic implementation, also suited for other (non general) matrix-vector multiplications +template +StatusCode Xgemv::MatVec(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, + const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, + const Buffer &x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + const Buffer &y_buffer, const size_t y_offset, const size_t y_inc, + bool fast_kernel, bool fast_kernel_rot, bool reversed, + const size_t kl, const size_t ku) { + // Makes sure all dimensions are larger than zero if (m == 0 || n == 0) { return StatusCode::kInvalidDimension; } @@ -64,6 +85,11 @@ StatusCode Xgemv::DoGemv(const Layout layout, const Transpose a_transpose, auto m_real = (a_transposed) ? n : m; auto n_real = (a_transposed) ? m : n; + // Special adjustments for banded matrices + if (kl != 0 || ku != 0) { + a_one = kl+ku+1; + } + // Determines whether the kernel needs to perform rotated access ('^' is the XOR operator) auto a_rotated = a_transposed ^ a_altlayout; @@ -79,26 +105,26 @@ StatusCode Xgemv::DoGemv(const Layout layout, const Transpose a_transpose, if (ErrorIn(status)) { return status; } // Determines whether or not the fast-version can be used - bool use_fast_kernel = (a_offset == 0) && (a_rotated == 0) && (a_conjugate == 0) && - IsMultiple(m, db_["WGS2"]*db_["WPT2"]) && - IsMultiple(n, db_["WGS2"]) && - IsMultiple(a_ld, db_["VW2"]); - bool use_fast_kernel_rot = (a_offset == 0) && (a_rotated == 1) && (a_conjugate == 0) && - IsMultiple(m, db_["WGS3"]*db_["WPT3"]) && - IsMultiple(n, db_["WGS3"]) && - IsMultiple(a_ld, db_["VW3"]); + fast_kernel = fast_kernel && (a_offset == 0) && (a_rotated == 0) && (a_conjugate == 0) && + IsMultiple(m, db_["WGS2"]*db_["WPT2"]) && + IsMultiple(n, db_["WGS2"]) && + IsMultiple(a_ld, db_["VW2"]); + fast_kernel_rot = fast_kernel_rot && (a_offset == 0) && (a_rotated == 1) && (a_conjugate == 0) && + IsMultiple(m, db_["WGS3"]*db_["WPT3"]) && + IsMultiple(n, db_["WGS3"]) && + IsMultiple(a_ld, db_["VW3"]); // If possible, run the fast-version (rotated or non-rotated) of the kernel auto kernel_name = "Xgemv"; auto m_ceiled = Ceil(m_real, db_["WGS1"]*db_["WPT1"]); auto global_size = m_ceiled / db_["WPT1"]; auto local_size = db_["WGS1"]; - if (use_fast_kernel) { + if (fast_kernel) { kernel_name = "XgemvFast"; global_size = m_real / db_["WPT2"]; local_size = db_["WGS2"]; } - if (use_fast_kernel_rot) { + if (fast_kernel_rot) { kernel_name = "XgemvFastRot"; global_size = m_real / db_["WPT3"]; local_size = db_["WGS3"]; @@ -125,6 +151,9 @@ StatusCode Xgemv::DoGemv(const Layout layout, const Transpose a_transpose, kernel.SetArgument(12, static_cast(y_offset)); kernel.SetArgument(13, static_cast(y_inc)); kernel.SetArgument(14, static_cast(a_conjugate)); + kernel.SetArgument(15, static_cast(reversed)); // only used for SYMV/HEMV routines + kernel.SetArgument(16, static_cast(kl)); // only used for GBMV routines + kernel.SetArgument(17, static_cast(ku)); // only used for GBMV routines // Launches the kernel auto global = std::vector{global_size}; diff --git a/src/routines/level2/xhemv.cc b/src/routines/level2/xhemv.cc index 2d92e45f..917bf9b6 100644 --- a/src/routines/level2/xhemv.cc +++ b/src/routines/level2/xhemv.cc @@ -37,57 +37,21 @@ StatusCode Xhemv::DoHemv(const Layout layout, const Triangle triangle, const T beta, const Buffer &y_buffer, const size_t y_offset, const size_t y_inc) { - // Makes sure all dimensions are larger than zero - if (n == 0) { return StatusCode::kInvalidDimension; } - - // Checks for validity of the squared A matrix - auto status = TestMatrixA(n, n, a_buffer, a_offset, a_ld, sizeof(T)); - if (ErrorIn(status)) { return status; } - - // Determines which kernel to run based on the layout (the Xgemv kernel assumes column-major as - // default) and on whether we are dealing with an upper or lower triangle of the hermitian matrix - bool is_upper = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) || + // The data is either in the upper or lower triangle + bool reversed = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) || (triangle == Triangle::kLower && layout == Layout::kRowMajor)); - auto kernel_name = (is_upper) ? "HermUpperToSquared" : "HermLowerToSquared"; - - // Temporary buffer for a copy of the hermitian matrix - try { - auto temp_herm = Buffer(context_, n*n); - - // Creates a general matrix from the hermitian matrix to be able to run the regular Xgemv - // routine afterwards - try { - auto& program = GetProgramFromCache(); - auto kernel = Kernel(program, kernel_name); - - // Sets the arguments for the hermitian-to-squared kernel - kernel.SetArgument(0, static_cast(n)); - kernel.SetArgument(1, static_cast(a_ld)); - kernel.SetArgument(2, static_cast(a_offset)); - kernel.SetArgument(3, a_buffer()); - kernel.SetArgument(4, static_cast(n)); - kernel.SetArgument(5, static_cast(n)); - kernel.SetArgument(6, static_cast(0)); - kernel.SetArgument(7, temp_herm()); - - // Uses the common padding kernel's thread configuration. This is allowed, since the - // hermitian-to-squared kernel uses the same parameters. - auto global = std::vector{Ceil(CeilDiv(n, db_["PAD_WPTX"]), db_["PAD_DIMX"]), - Ceil(CeilDiv(n, db_["PAD_WPTY"]), db_["PAD_DIMY"])}; - auto local = std::vector{db_["PAD_DIMX"], db_["PAD_DIMY"]}; - status = RunKernel(kernel, global, local); - if (ErrorIn(status)) { return status; } - - // Runs the regular Xgemv code - status = DoGemv(layout, Transpose::kNo, n, n, alpha, - temp_herm, 0, n, - x_buffer, x_offset, x_inc, beta, - y_buffer, y_offset, y_inc); - // Return the status of the Xgemv routine - return status; - } catch (...) { return StatusCode::kInvalidKernel; } - } catch (...) { return StatusCode::kTempBufferAllocFailure; } + // Runs the generic matrix-vector multiplication, disabling the use of fast vectorized kernels. + // The specific hermitian matrix-accesses are implemented in the kernel guarded by the + // ROUTINE_HEMV define. + bool fast_kernels = false; + return MatVec(layout, Transpose::kNo, + n, n, alpha, + a_buffer, a_offset, a_ld, + x_buffer, x_offset, x_inc, beta, + y_buffer, y_offset, y_inc, + fast_kernels, fast_kernels, + reversed, 0, 0); } // ================================================================================================= diff --git a/src/routines/level2/xsymv.cc b/src/routines/level2/xsymv.cc index 2ccb51f6..15c91f47 100644 --- a/src/routines/level2/xsymv.cc +++ b/src/routines/level2/xsymv.cc @@ -37,57 +37,21 @@ StatusCode Xsymv::DoSymv(const Layout layout, const Triangle triangle, const T beta, const Buffer &y_buffer, const size_t y_offset, const size_t y_inc) { - // Makes sure all dimensions are larger than zero - if (n == 0) { return StatusCode::kInvalidDimension; } - - // Checks for validity of the squared A matrix - auto status = TestMatrixA(n, n, a_buffer, a_offset, a_ld, sizeof(T)); - if (ErrorIn(status)) { return status; } - - // Determines which kernel to run based on the layout (the Xgemv kernel assumes column-major as - // default) and on whether we are dealing with an upper or lower triangle of the symmetric matrix - bool is_upper = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) || + // The data is either in the upper or lower triangle + bool reversed = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) || (triangle == Triangle::kLower && layout == Layout::kRowMajor)); - auto kernel_name = (is_upper) ? "SymmUpperToSquared" : "SymmLowerToSquared"; - - // Temporary buffer for a copy of the symmetric matrix - try { - auto temp_symm = Buffer(context_, n*n); - - // Creates a general matrix from the symmetric matrix to be able to run the regular Xgemv - // routine afterwards - try { - auto& program = GetProgramFromCache(); - auto kernel = Kernel(program, kernel_name); - - // Sets the arguments for the symmetric-to-squared kernel - kernel.SetArgument(0, static_cast(n)); - kernel.SetArgument(1, static_cast(a_ld)); - kernel.SetArgument(2, static_cast(a_offset)); - kernel.SetArgument(3, a_buffer()); - kernel.SetArgument(4, static_cast(n)); - kernel.SetArgument(5, static_cast(n)); - kernel.SetArgument(6, static_cast(0)); - kernel.SetArgument(7, temp_symm()); - - // Uses the common padding kernel's thread configuration. This is allowed, since the - // symmetric-to-squared kernel uses the same parameters. - auto global = std::vector{Ceil(CeilDiv(n, db_["PAD_WPTX"]), db_["PAD_DIMX"]), - Ceil(CeilDiv(n, db_["PAD_WPTY"]), db_["PAD_DIMY"])}; - auto local = std::vector{db_["PAD_DIMX"], db_["PAD_DIMY"]}; - status = RunKernel(kernel, global, local); - if (ErrorIn(status)) { return status; } - - // Runs the regular Xgemv code - status = DoGemv(layout, Transpose::kNo, n, n, alpha, - temp_symm, 0, n, - x_buffer, x_offset, x_inc, beta, - y_buffer, y_offset, y_inc); - // Return the status of the Xgemv routine - return status; - } catch (...) { return StatusCode::kInvalidKernel; } - } catch (...) { return StatusCode::kTempBufferAllocFailure; } + // Runs the generic matrix-vector multiplication, disabling the use of fast vectorized kernels. + // The specific symmetric matrix-accesses are implemented in the kernel guarded by the + // ROUTINE_SYMV define. + bool fast_kernels = false; + return MatVec(layout, Transpose::kNo, + n, n, alpha, + a_buffer, a_offset, a_ld, + x_buffer, x_offset, x_inc, beta, + y_buffer, y_offset, y_inc, + fast_kernels, fast_kernels, + reversed, 0, 0); } // ================================================================================================= -- cgit v1.2.3