summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGELOG1
-rw-r--r--include/internal/routines/level2/xgbmv.h17
-rw-r--r--include/internal/routines/level2/xgemv.h11
-rw-r--r--include/internal/routines/level2/xhemv.h17
-rw-r--r--include/internal/routines/level2/xsymv.h15
-rw-r--r--src/kernels/level2/xgemv.opencl80
-rw-r--r--src/kernels/matrix_transforms/gbgemt.opencl60
-rw-r--r--src/kernels/matrix_transforms/transforms.opencl40
-rw-r--r--src/routines/level2/xgbmv.cc80
-rw-r--r--src/routines/level2/xgemv.cc55
-rw-r--r--src/routines/level2/xhemv.cc62
-rw-r--r--src/routines/level2/xsymv.cc62
12 files changed, 167 insertions, 333 deletions
diff --git a/CHANGELOG b/CHANGELOG
index ff0646bd..7571aa5e 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,5 +1,6 @@
Development version (next release)
+- Improved structure and performance of level-2 routines (xSYMV/xHEMV)
- Added level-1 routines:
* SSWAP/DSWAP/CSWAP/ZSWAP
* SSCAL/DSCAL/CSCAL/ZSCAL
diff --git a/include/internal/routines/level2/xgbmv.h b/include/internal/routines/level2/xgbmv.h
index 763168d4..27b033e9 100644
--- a/include/internal/routines/level2/xgbmv.h
+++ b/include/internal/routines/level2/xgbmv.h
@@ -7,10 +7,9 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
-// This file implements the Xgbmv routine. It is based on the generalized matrix multiplication
+// This file implements the Xgbmv routine. It is based on the generalized mat-vec multiplication
// routine (Xgemv). The Xgbmv class inherits from the templated class Xgemv, allowing it to call the
-// "DoGemm" function directly. The "DoGbmv" function first preprocesses the banded matrix by
-// transforming it into a general matrix, and then calls the regular GEMV code.
+// "MatVec" function directly.
//
// =================================================================================================
@@ -27,16 +26,8 @@ template <typename T>
class Xgbmv: public Xgemv<T> {
public:
- // Members and methods from the base class
- using Routine<T>::db_;
- using Routine<T>::context_;
- using Routine<T>::GetProgramFromCache;
- using Routine<T>::TestMatrixA;
- using Routine<T>::RunKernel;
- using Routine<T>::ErrorIn;
-
- // Uses the regular Xgemv routine
- using Xgemv<T>::DoGemv;
+ // Uses the generic matrix-vector routine
+ using Xgemv<T>::MatVec;
// Constructor
Xgbmv(Queue &queue, Event &event, const std::string &name = "GBMV");
diff --git a/include/internal/routines/level2/xgemv.h b/include/internal/routines/level2/xgemv.h
index 1e120a5e..d18f05b4 100644
--- a/include/internal/routines/level2/xgemv.h
+++ b/include/internal/routines/level2/xgemv.h
@@ -47,6 +47,17 @@ class Xgemv: public Routine<T> {
const T beta,
const Buffer<T> &y_buffer, const size_t y_offset, const size_t y_inc);
+ // Generic version used also for other matrix-vector multiplications
+ StatusCode MatVec(const Layout layout, const Transpose a_transpose,
+ const size_t m, const size_t n,
+ const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
+ const Buffer<T> &x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ const Buffer<T> &y_buffer, const size_t y_offset, const size_t y_inc,
+ bool fast_kernel, bool fast_kernel_rot, bool reversed,
+ const size_t kl, const size_t ku);
+
private:
// Static variable to get the precision
const static Precision precision_;
diff --git a/include/internal/routines/level2/xhemv.h b/include/internal/routines/level2/xhemv.h
index 311ad9f8..b74db760 100644
--- a/include/internal/routines/level2/xhemv.h
+++ b/include/internal/routines/level2/xhemv.h
@@ -7,10 +7,9 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
-// This file implements the Xhemv routine. It is based on the generalized matrix multiplication
+// This file implements the Xhemv routine. It is based on the generalized mat-vec multiplication
// routine (Xgemv). The Xhemv class inherits from the templated class Xgemv, allowing it to call the
-// "DoGemm" function directly. The "DoHemv" function first preprocesses the hermetian matrix by
-// transforming it into a general matrix, and then calls the regular GEMV code.
+// "MatVec" function directly.
//
// =================================================================================================
@@ -27,16 +26,8 @@ template <typename T>
class Xhemv: public Xgemv<T> {
public:
- // Members and methods from the base class
- using Routine<T>::db_;
- using Routine<T>::context_;
- using Routine<T>::GetProgramFromCache;
- using Routine<T>::TestMatrixA;
- using Routine<T>::RunKernel;
- using Routine<T>::ErrorIn;
-
- // Uses the regular Xgemv routine
- using Xgemv<T>::DoGemv;
+ // Uses the generic matrix-vector routine
+ using Xgemv<T>::MatVec;
// Constructor
Xhemv(Queue &queue, Event &event, const std::string &name = "HEMV");
diff --git a/include/internal/routines/level2/xsymv.h b/include/internal/routines/level2/xsymv.h
index ab6da6d1..c7b92702 100644
--- a/include/internal/routines/level2/xsymv.h
+++ b/include/internal/routines/level2/xsymv.h
@@ -9,8 +9,7 @@
//
// This file implements the Xsymv routine. It is based on the generalized mat-vec multiplication
// routine (Xgemv). The Xsymv class inherits from the templated class Xgemv, allowing it to call the
-// "DoGemm" function directly. The "DoSymv" function first preprocesses the symmetric matrix by
-// transforming it into a general matrix, and then calls the regular GEMV code.
+// "MatVec" function directly.
//
// =================================================================================================
@@ -27,16 +26,8 @@ template <typename T>
class Xsymv: public Xgemv<T> {
public:
- // Members and methods from the base class
- using Routine<T>::db_;
- using Routine<T>::context_;
- using Routine<T>::GetProgramFromCache;
- using Routine<T>::TestMatrixA;
- using Routine<T>::RunKernel;
- using Routine<T>::ErrorIn;
-
- // Uses the regular Xgemv routine
- using Xgemv<T>::DoGemv;
+ // Uses the generic matrix-vector routine
+ using Xgemv<T>::MatVec;
// Constructor
Xsymv(Queue &queue, Event &event, const std::string &name = "SYMV");
diff --git a/src/kernels/level2/xgemv.opencl b/src/kernels/level2/xgemv.opencl
index 1e12dd78..0ecfc960 100644
--- a/src/kernels/level2/xgemv.opencl
+++ b/src/kernels/level2/xgemv.opencl
@@ -79,22 +79,61 @@ R"(
#endif
// =================================================================================================
-// Defines how to load the input matrix in the regular case
-// Loads a scalar input value
+// Defines how to load the input matrix in the non-vectorized case
inline real LoadMatrixA(const __global real* restrict agm, const int x, const int y,
- const int a_ld, const int a_offset) {
- return agm[x + a_ld*y + a_offset];
+ const int a_ld, const int a_offset, const int reversed,
+ const int kl, const int ku) {
+ real result;
+
+ // For symmetric matrices
+ #if defined(ROUTINE_SYMV)
+ if ((reversed == 0 && y <= x) || (reversed == 1 && x <= y)) {
+ result = agm[y*a_ld + x + a_offset];
+ }
+ else {
+ result = agm[x*a_ld + y + a_offset];
+ }
+
+ // For hermitian matrices
+ #elif defined(ROUTINE_HEMV)
+ if ((reversed == 0 && y <= x) || (reversed == 1 && x <= y)) {
+ result = agm[y*a_ld + x + a_offset];
+ if (x == y) { result.y = ZERO; }
+ }
+ else {
+ result = agm[x*a_ld + y + a_offset];
+ COMPLEX_CONJUGATE(result);
+ }
+
+ // For banded matrices
+ #elif defined(ROUTINE_GBMV)
+ const int k = ku-y+x;
+ if (x >= y-ku && x < y+kl+1) {
+ result = agm[a_ld*y + k + a_offset];
+ }
+ else {
+ SetToZero(result);
+ }
+
+ // For general matrices
+ #else
+ result = agm[a_ld*y + x + a_offset];
+ #endif
+
+ return result;
}
+
// Loads a vector input value (1/2)
inline realVF LoadMatrixAVF(const __global realVF* restrict agm, const int x, const int y,
const int a_ld) {
- return agm[x + a_ld*y];
+ return agm[a_ld*y + x];
}
+
// Loads a vector input value (2/2): as before, but different data-type
inline realVFR LoadMatrixAVFR(const __global realVFR* restrict agm, const int x, const int y,
const int a_ld) {
- return agm[x + a_ld*y];
+ return agm[a_ld*y + x];
}
// =================================================================================================
@@ -106,7 +145,8 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
const __global real* restrict agm, const int a_offset, const int a_ld,
const __global real* restrict xgm, const int x_offset, const int x_inc,
__global real* ygm, const int y_offset, const int y_inc,
- const int do_conjugate) {
+ const int do_conjugate, const int reversed,
+ const int kl, const int ku) {
// Local memory for the vector X
__local real xlm[WGS1];
@@ -141,20 +181,20 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
// The multiply-add function for the main part (divisable by WGS1)
if (a_rotated == 0) { // Not rotated
#pragma unroll
- for (int kl=0; kl<WGS1; ++kl) {
- const int k = kwg + kl;
- real value = LoadMatrixA(agm, gid, k, a_ld, a_offset);
+ for (int kloop=0; kloop<WGS1; ++kloop) {
+ const int k = kwg + kloop;
+ real value = LoadMatrixA(agm, gid, k, a_ld, a_offset, reversed, kl, ku);
if (do_conjugate == 1) { COMPLEX_CONJUGATE(value); }
- MultiplyAdd(acc[w], xlm[kl], value);
+ MultiplyAdd(acc[w], xlm[kloop], value);
}
}
else { // Transposed
#pragma unroll
- for (int kl=0; kl<WGS1; ++kl) {
- const int k = kwg + kl;
- real value = LoadMatrixA(agm, k, gid, a_ld, a_offset);
+ for (int kloop=0; kloop<WGS1; ++kloop) {
+ const int k = kwg + kloop;
+ real value = LoadMatrixA(agm, k, gid, a_ld, a_offset, reversed, kl, ku);
if (do_conjugate == 1) { COMPLEX_CONJUGATE(value); }
- MultiplyAdd(acc[w], xlm[kl], value);
+ MultiplyAdd(acc[w], xlm[kloop], value);
}
}
}
@@ -174,7 +214,7 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
if (a_rotated == 0) { // Not rotated
#pragma unroll
for (int k=n_floor; k<n; ++k) {
- real value = LoadMatrixA(agm, gid, k, a_ld, a_offset);
+ real value = LoadMatrixA(agm, gid, k, a_ld, a_offset, reversed, kl, ku);
if (do_conjugate == 1) { COMPLEX_CONJUGATE(value); }
MultiplyAdd(acc[w], xgm[k*x_inc + x_offset], value);
}
@@ -182,7 +222,7 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
else { // Transposed
#pragma unroll
for (int k=n_floor; k<n; ++k) {
- real value = LoadMatrixA(agm, k, gid, a_ld, a_offset);
+ real value = LoadMatrixA(agm, k, gid, a_ld, a_offset, reversed, kl, ku);
if (do_conjugate == 1) { COMPLEX_CONJUGATE(value); }
MultiplyAdd(acc[w], xgm[k*x_inc + x_offset], value);
}
@@ -209,7 +249,8 @@ __kernel void XgemvFast(const int m, const int n, const real alpha, const real b
const __global realVF* restrict agm, const int a_offset, const int a_ld,
const __global real* restrict xgm, const int x_offset, const int x_inc,
__global real* ygm, const int y_offset, const int y_inc,
- const int do_conjugate) {
+ const int do_conjugate, const int reversed,
+ const int kl, const int ku) {
// Local memory for the vector X
__local real xlm[WGS2];
@@ -305,7 +346,8 @@ __kernel void XgemvFastRot(const int m, const int n, const real alpha, const rea
const __global realVFR* restrict agm, const int a_offset, const int a_ld,
const __global real* restrict xgm, const int x_offset, const int x_inc,
__global real* ygm, const int y_offset, const int y_inc,
- const int do_conjugate) {
+ const int do_conjugate, const int reversed,
+ const int kl, const int ku) {
// Local memory for the vector X
__local real xlm[WGS3];
diff --git a/src/kernels/matrix_transforms/gbgemt.opencl b/src/kernels/matrix_transforms/gbgemt.opencl
deleted file mode 100644
index e46e3a59..00000000
--- a/src/kernels/matrix_transforms/gbgemt.opencl
+++ /dev/null
@@ -1,60 +0,0 @@
-
-// =================================================================================================
-// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
-// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
-// width of 100 characters per line.
-//
-// Author(s):
-// Cedric Nugteren <www.cedricnugteren.nl>
-//
-// This file contains the general banded (gb) to general (ge) matrix transforms.
-//
-// This kernel uses the matrix-transforms common tuning parameters.
-//
-// =================================================================================================
-
-// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
-// literal). Comment-out this line for syntax-highlighting when developing.
-R"(
-
-// =================================================================================================
-#if defined(ROUTINE_GBMV)
-
-// Kernel to transform a general banded matrix into a general matrix
-__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1)))
-__kernel void GeneralBandedToGeneral(const int src_one, const int src_two,
- const int src_ld, const int src_offset,
- __global const real* restrict src,
- const int dest_one, const int dest_two,
- const int dest_ld, const int dest_offset,
- __global real* dest,
- const int layout,
- const int kl, const int ku) {
-
- // Loops over the work per thread in both dimensions
- #pragma unroll
- for (int w_one=0; w_one<PAD_WPTX; ++w_one) {
- const int id_one = (get_group_id(0)*PAD_WPTX + w_one) * PAD_DIMX + get_local_id(0);
- #pragma unroll
- for (int w_two=0; w_two<PAD_WPTY; ++w_two) {
- const int id_two = (get_group_id(1)*PAD_WPTY + w_two) * PAD_DIMY + get_local_id(1);
- if (id_two < dest_two && id_one < dest_one) {
- real result;
- SetToZero(result);
- const int k = ku - id_two + id_one;
- if ((id_one >= id_two - ku) && (id_one < id_two + kl + 1)) {
- result = src[id_two*src_ld + k + src_offset];
- }
- dest[id_two*dest_ld + id_one + dest_offset] = result;
- }
- }
- }
-}
-
-#endif
-// =================================================================================================
-
-// End of the C++11 raw string literal
-)"
-
-// =================================================================================================
diff --git a/src/kernels/matrix_transforms/transforms.opencl b/src/kernels/matrix_transforms/transforms.opencl
deleted file mode 100644
index 01889a13..00000000
--- a/src/kernels/matrix_transforms/transforms.opencl
+++ /dev/null
@@ -1,40 +0,0 @@
-
-// =================================================================================================
-// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
-// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
-// width of 100 characters per line.
-//
-// Author(s):
-// Cedric Nugteren <www.cedricnugteren.nl>
-//
-// This file contains the common functions and parameters specific for matrix-transform kernels.
-//
-// =================================================================================================
-
-// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
-// literal). Comment-out this line for syntax-highlighting when developing.
-R"(
-
-// =================================================================================================
-
-// Parameters set by the tuner or by the database. Here they are given a basic default value in case
-// this kernel file is used outside of the CLBlast library.
-#ifndef PAD_DIMX
- #define PAD_DIMX 8 // Local workgroup size in the first dimension (x)
-#endif
-#ifndef PAD_DIMY
- #define PAD_DIMY 8 // Local workgroup size in the second dimension (y)
-#endif
-#ifndef PAD_WPTX
- #define PAD_WPTX 1 // Work per thread in the first dimension (x)
-#endif
-#ifndef PAD_WPTY
- #define PAD_WPTY 1 // Work per thread in the second dimension (y)
-#endif
-
-// =================================================================================================
-
-// End of the C++11 raw string literal
-)"
-
-// =================================================================================================
diff --git a/src/routines/level2/xgbmv.cc b/src/routines/level2/xgbmv.cc
index eac208b3..8657c254 100644
--- a/src/routines/level2/xgbmv.cc
+++ b/src/routines/level2/xgbmv.cc
@@ -37,72 +37,22 @@ StatusCode Xgbmv<T>::DoGbmv(const Layout layout, const Transpose a_transpose,
const T beta,
const Buffer<T> &y_buffer, const size_t y_offset, const size_t y_inc) {
- // Makes sure all dimensions are larger than zero
- if (n == 0 || m == 0) { return StatusCode::kInvalidDimension; }
-
- //
+ // Reverses the upper and lower band count
auto rotated = (layout == Layout::kRowMajor);
- auto t_one = (rotated) ? n : m;
- auto t_two = (rotated) ? m : n;
- auto a_one = kl+ku+1;
- auto a_two = (rotated) ? m : n;
-
- // Checks for validity of the A matrix
- auto status = StatusCode::kSuccess;
- if (a_ld < a_one) { return StatusCode::kInvalidLeadDimA; }
- try {
- auto required_size = (a_ld*a_two + a_offset)*sizeof(T);
- auto buffer_size = a_buffer.GetSize();
- if (buffer_size < required_size) { return StatusCode::kInsufficientMemoryA; }
- } catch (...) { return StatusCode::kInvalidMatrixA; }
-
- // Temporary buffer to generalize the input matrix
- try {
- auto t_buffer = Buffer<T>(context_, t_one*t_two);
-
- // Creates a general matrix from the input to be able to run the regular Xgemv routine
- try {
- auto& program = GetProgramFromCache();
- auto kernel = Kernel(program, "GeneralBandedToGeneral");
-
- // Sets the arguments for the matrix transform kernel
- kernel.SetArgument(0, static_cast<int>(a_one));
- kernel.SetArgument(1, static_cast<int>(a_two));
- kernel.SetArgument(2, static_cast<int>(a_ld));
- kernel.SetArgument(3, static_cast<int>(a_offset));
- kernel.SetArgument(4, a_buffer());
- kernel.SetArgument(5, static_cast<int>(t_one));
- kernel.SetArgument(6, static_cast<int>(t_two));
- kernel.SetArgument(7, static_cast<int>(t_one));
- kernel.SetArgument(8, static_cast<int>(0));
- kernel.SetArgument(9, t_buffer());
- kernel.SetArgument(10, static_cast<int>(layout));
- if (rotated) {
- kernel.SetArgument(11, static_cast<int>(ku));
- kernel.SetArgument(12, static_cast<int>(kl));
- }
- else {
- kernel.SetArgument(11, static_cast<int>(kl));
- kernel.SetArgument(12, static_cast<int>(ku));
- }
-
- // Uses the common matrix-transforms thread configuration
- auto global = std::vector<size_t>{Ceil(CeilDiv(t_one, db_["PAD_WPTX"]), db_["PAD_DIMX"]),
- Ceil(CeilDiv(t_two, db_["PAD_WPTY"]), db_["PAD_DIMY"])};
- auto local = std::vector<size_t>{db_["PAD_DIMX"], db_["PAD_DIMY"]};
- status = RunKernel(kernel, global, local);
- if (ErrorIn(status)) { return status; }
-
- // Runs the regular Xgemv code
- status = DoGemv(layout, a_transpose, m, n, alpha,
- t_buffer, 0, t_one,
- x_buffer, x_offset, x_inc, beta,
- y_buffer, y_offset, y_inc);
-
- // Return the status of the Xgemv routine
- return status;
- } catch (...) { return StatusCode::kInvalidKernel; }
- } catch (...) { return StatusCode::kTempBufferAllocFailure; }
+ auto kl_real = (rotated) ? ku : kl;
+ auto ku_real = (rotated) ? kl : ku;
+
+ // Runs the generic matrix-vector multiplication, disabling the use of fast vectorized kernels.
+ // The specific hermitian matrix-accesses are implemented in the kernel guarded by the
+ // ROUTINE_GBMV define.
+ bool fast_kernels = false;
+ return MatVec(layout, a_transpose,
+ m, n, alpha,
+ a_buffer, a_offset, a_ld,
+ x_buffer, x_offset, x_inc, beta,
+ y_buffer, y_offset, y_inc,
+ fast_kernels, fast_kernels,
+ false, kl_real, ku_real);
}
// =================================================================================================
diff --git a/src/routines/level2/xgemv.cc b/src/routines/level2/xgemv.cc
index e52d2f20..6e2303c0 100644
--- a/src/routines/level2/xgemv.cc
+++ b/src/routines/level2/xgemv.cc
@@ -32,9 +32,6 @@ template <typename T>
Xgemv<T>::Xgemv(Queue &queue, Event &event, const std::string &name):
Routine<T>(queue, event, name, {"Pad", "Xgemv"}, precision_) {
source_string_ =
- #include "../../kernels/pad.opencl" // TODO: replace
- #include "../../kernels/matrix_transforms/transforms.opencl"
- #include "../../kernels/matrix_transforms/gbgemt.opencl"
#include "../../kernels/level2/xgemv.opencl"
;
}
@@ -51,6 +48,30 @@ StatusCode Xgemv<T>::DoGemv(const Layout layout, const Transpose a_transpose,
const T beta,
const Buffer<T> &y_buffer, const size_t y_offset, const size_t y_inc) {
+ // Performs the matrix-vector multiplication
+ return MatVec(layout, a_transpose,
+ m, n, alpha,
+ a_buffer, a_offset, a_ld,
+ x_buffer, x_offset, x_inc, beta,
+ y_buffer, y_offset, y_inc,
+ true, true,
+ false, 0, 0); // N/A for this routine
+}
+
+// =================================================================================================
+
+// The generic implementation, also suited for other (non general) matrix-vector multiplications
+template <typename T>
+StatusCode Xgemv<T>::MatVec(const Layout layout, const Transpose a_transpose,
+ const size_t m, const size_t n,
+ const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
+ const Buffer<T> &x_buffer, const size_t x_offset, const size_t x_inc,
+ const T beta,
+ const Buffer<T> &y_buffer, const size_t y_offset, const size_t y_inc,
+ bool fast_kernel, bool fast_kernel_rot, bool reversed,
+ const size_t kl, const size_t ku) {
+
// Makes sure all dimensions are larger than zero
if (m == 0 || n == 0) { return StatusCode::kInvalidDimension; }
@@ -64,6 +85,11 @@ StatusCode Xgemv<T>::DoGemv(const Layout layout, const Transpose a_transpose,
auto m_real = (a_transposed) ? n : m;
auto n_real = (a_transposed) ? m : n;
+ // Special adjustments for banded matrices
+ if (kl != 0 || ku != 0) {
+ a_one = kl+ku+1;
+ }
+
// Determines whether the kernel needs to perform rotated access ('^' is the XOR operator)
auto a_rotated = a_transposed ^ a_altlayout;
@@ -79,26 +105,26 @@ StatusCode Xgemv<T>::DoGemv(const Layout layout, const Transpose a_transpose,
if (ErrorIn(status)) { return status; }
// Determines whether or not the fast-version can be used
- bool use_fast_kernel = (a_offset == 0) && (a_rotated == 0) && (a_conjugate == 0) &&
- IsMultiple(m, db_["WGS2"]*db_["WPT2"]) &&
- IsMultiple(n, db_["WGS2"]) &&
- IsMultiple(a_ld, db_["VW2"]);
- bool use_fast_kernel_rot = (a_offset == 0) && (a_rotated == 1) && (a_conjugate == 0) &&
- IsMultiple(m, db_["WGS3"]*db_["WPT3"]) &&
- IsMultiple(n, db_["WGS3"]) &&
- IsMultiple(a_ld, db_["VW3"]);
+ fast_kernel = fast_kernel && (a_offset == 0) && (a_rotated == 0) && (a_conjugate == 0) &&
+ IsMultiple(m, db_["WGS2"]*db_["WPT2"]) &&
+ IsMultiple(n, db_["WGS2"]) &&
+ IsMultiple(a_ld, db_["VW2"]);
+ fast_kernel_rot = fast_kernel_rot && (a_offset == 0) && (a_rotated == 1) && (a_conjugate == 0) &&
+ IsMultiple(m, db_["WGS3"]*db_["WPT3"]) &&
+ IsMultiple(n, db_["WGS3"]) &&
+ IsMultiple(a_ld, db_["VW3"]);
// If possible, run the fast-version (rotated or non-rotated) of the kernel
auto kernel_name = "Xgemv";
auto m_ceiled = Ceil(m_real, db_["WGS1"]*db_["WPT1"]);
auto global_size = m_ceiled / db_["WPT1"];
auto local_size = db_["WGS1"];
- if (use_fast_kernel) {
+ if (fast_kernel) {
kernel_name = "XgemvFast";
global_size = m_real / db_["WPT2"];
local_size = db_["WGS2"];
}
- if (use_fast_kernel_rot) {
+ if (fast_kernel_rot) {
kernel_name = "XgemvFastRot";
global_size = m_real / db_["WPT3"];
local_size = db_["WGS3"];
@@ -125,6 +151,9 @@ StatusCode Xgemv<T>::DoGemv(const Layout layout, const Transpose a_transpose,
kernel.SetArgument(12, static_cast<int>(y_offset));
kernel.SetArgument(13, static_cast<int>(y_inc));
kernel.SetArgument(14, static_cast<int>(a_conjugate));
+ kernel.SetArgument(15, static_cast<int>(reversed)); // only used for SYMV/HEMV routines
+ kernel.SetArgument(16, static_cast<int>(kl)); // only used for GBMV routines
+ kernel.SetArgument(17, static_cast<int>(ku)); // only used for GBMV routines
// Launches the kernel
auto global = std::vector<size_t>{global_size};
diff --git a/src/routines/level2/xhemv.cc b/src/routines/level2/xhemv.cc
index 2d92e45f..917bf9b6 100644
--- a/src/routines/level2/xhemv.cc
+++ b/src/routines/level2/xhemv.cc
@@ -37,57 +37,21 @@ StatusCode Xhemv<T>::DoHemv(const Layout layout, const Triangle triangle,
const T beta,
const Buffer<T> &y_buffer, const size_t y_offset, const size_t y_inc) {
- // Makes sure all dimensions are larger than zero
- if (n == 0) { return StatusCode::kInvalidDimension; }
-
- // Checks for validity of the squared A matrix
- auto status = TestMatrixA(n, n, a_buffer, a_offset, a_ld, sizeof(T));
- if (ErrorIn(status)) { return status; }
-
- // Determines which kernel to run based on the layout (the Xgemv kernel assumes column-major as
- // default) and on whether we are dealing with an upper or lower triangle of the hermitian matrix
- bool is_upper = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) ||
+ // The data is either in the upper or lower triangle
+ bool reversed = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) ||
(triangle == Triangle::kLower && layout == Layout::kRowMajor));
- auto kernel_name = (is_upper) ? "HermUpperToSquared" : "HermLowerToSquared";
-
- // Temporary buffer for a copy of the hermitian matrix
- try {
- auto temp_herm = Buffer<T>(context_, n*n);
-
- // Creates a general matrix from the hermitian matrix to be able to run the regular Xgemv
- // routine afterwards
- try {
- auto& program = GetProgramFromCache();
- auto kernel = Kernel(program, kernel_name);
-
- // Sets the arguments for the hermitian-to-squared kernel
- kernel.SetArgument(0, static_cast<int>(n));
- kernel.SetArgument(1, static_cast<int>(a_ld));
- kernel.SetArgument(2, static_cast<int>(a_offset));
- kernel.SetArgument(3, a_buffer());
- kernel.SetArgument(4, static_cast<int>(n));
- kernel.SetArgument(5, static_cast<int>(n));
- kernel.SetArgument(6, static_cast<int>(0));
- kernel.SetArgument(7, temp_herm());
-
- // Uses the common padding kernel's thread configuration. This is allowed, since the
- // hermitian-to-squared kernel uses the same parameters.
- auto global = std::vector<size_t>{Ceil(CeilDiv(n, db_["PAD_WPTX"]), db_["PAD_DIMX"]),
- Ceil(CeilDiv(n, db_["PAD_WPTY"]), db_["PAD_DIMY"])};
- auto local = std::vector<size_t>{db_["PAD_DIMX"], db_["PAD_DIMY"]};
- status = RunKernel(kernel, global, local);
- if (ErrorIn(status)) { return status; }
-
- // Runs the regular Xgemv code
- status = DoGemv(layout, Transpose::kNo, n, n, alpha,
- temp_herm, 0, n,
- x_buffer, x_offset, x_inc, beta,
- y_buffer, y_offset, y_inc);
- // Return the status of the Xgemv routine
- return status;
- } catch (...) { return StatusCode::kInvalidKernel; }
- } catch (...) { return StatusCode::kTempBufferAllocFailure; }
+ // Runs the generic matrix-vector multiplication, disabling the use of fast vectorized kernels.
+ // The specific hermitian matrix-accesses are implemented in the kernel guarded by the
+ // ROUTINE_HEMV define.
+ bool fast_kernels = false;
+ return MatVec(layout, Transpose::kNo,
+ n, n, alpha,
+ a_buffer, a_offset, a_ld,
+ x_buffer, x_offset, x_inc, beta,
+ y_buffer, y_offset, y_inc,
+ fast_kernels, fast_kernels,
+ reversed, 0, 0);
}
// =================================================================================================
diff --git a/src/routines/level2/xsymv.cc b/src/routines/level2/xsymv.cc
index 2ccb51f6..15c91f47 100644
--- a/src/routines/level2/xsymv.cc
+++ b/src/routines/level2/xsymv.cc
@@ -37,57 +37,21 @@ StatusCode Xsymv<T>::DoSymv(const Layout layout, const Triangle triangle,
const T beta,
const Buffer<T> &y_buffer, const size_t y_offset, const size_t y_inc) {
- // Makes sure all dimensions are larger than zero
- if (n == 0) { return StatusCode::kInvalidDimension; }
-
- // Checks for validity of the squared A matrix
- auto status = TestMatrixA(n, n, a_buffer, a_offset, a_ld, sizeof(T));
- if (ErrorIn(status)) { return status; }
-
- // Determines which kernel to run based on the layout (the Xgemv kernel assumes column-major as
- // default) and on whether we are dealing with an upper or lower triangle of the symmetric matrix
- bool is_upper = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) ||
+ // The data is either in the upper or lower triangle
+ bool reversed = ((triangle == Triangle::kUpper && layout != Layout::kRowMajor) ||
(triangle == Triangle::kLower && layout == Layout::kRowMajor));
- auto kernel_name = (is_upper) ? "SymmUpperToSquared" : "SymmLowerToSquared";
-
- // Temporary buffer for a copy of the symmetric matrix
- try {
- auto temp_symm = Buffer<T>(context_, n*n);
-
- // Creates a general matrix from the symmetric matrix to be able to run the regular Xgemv
- // routine afterwards
- try {
- auto& program = GetProgramFromCache();
- auto kernel = Kernel(program, kernel_name);
-
- // Sets the arguments for the symmetric-to-squared kernel
- kernel.SetArgument(0, static_cast<int>(n));
- kernel.SetArgument(1, static_cast<int>(a_ld));
- kernel.SetArgument(2, static_cast<int>(a_offset));
- kernel.SetArgument(3, a_buffer());
- kernel.SetArgument(4, static_cast<int>(n));
- kernel.SetArgument(5, static_cast<int>(n));
- kernel.SetArgument(6, static_cast<int>(0));
- kernel.SetArgument(7, temp_symm());
-
- // Uses the common padding kernel's thread configuration. This is allowed, since the
- // symmetric-to-squared kernel uses the same parameters.
- auto global = std::vector<size_t>{Ceil(CeilDiv(n, db_["PAD_WPTX"]), db_["PAD_DIMX"]),
- Ceil(CeilDiv(n, db_["PAD_WPTY"]), db_["PAD_DIMY"])};
- auto local = std::vector<size_t>{db_["PAD_DIMX"], db_["PAD_DIMY"]};
- status = RunKernel(kernel, global, local);
- if (ErrorIn(status)) { return status; }
-
- // Runs the regular Xgemv code
- status = DoGemv(layout, Transpose::kNo, n, n, alpha,
- temp_symm, 0, n,
- x_buffer, x_offset, x_inc, beta,
- y_buffer, y_offset, y_inc);
- // Return the status of the Xgemv routine
- return status;
- } catch (...) { return StatusCode::kInvalidKernel; }
- } catch (...) { return StatusCode::kTempBufferAllocFailure; }
+ // Runs the generic matrix-vector multiplication, disabling the use of fast vectorized kernels.
+ // The specific symmetric matrix-accesses are implemented in the kernel guarded by the
+ // ROUTINE_SYMV define.
+ bool fast_kernels = false;
+ return MatVec(layout, Transpose::kNo,
+ n, n, alpha,
+ a_buffer, a_offset, a_ld,
+ x_buffer, x_offset, x_inc, beta,
+ y_buffer, y_offset, y_inc,
+ fast_kernels, fast_kernels,
+ reversed, 0, 0);
}
// =================================================================================================