From 85c1db93221bf9d71083c6725a33ccbcd1b61de4 Mon Sep 17 00:00:00 2001 From: CNugteren Date: Wed, 10 Jun 2015 08:44:30 +0200 Subject: Added initial naive version of Xgemv kernel --- include/internal/tuning.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/internal/tuning.h b/include/internal/tuning.h index 7768888c..6ddf4b3a 100644 --- a/include/internal/tuning.h +++ b/include/internal/tuning.h @@ -38,6 +38,10 @@ using Tuner3 = std::function&, template void TunerXY(int argc, char* argv[], const Tuner2 &tune_function); +// Tuner for matrix-vector-vector input +template +void TunerAXY(int argc, char* argv[], const Tuner3 &tune_function); + // Tuner for matrix-matrix input template void TunerAB(int argc, char* argv[], const Tuner2 &tune_function); -- cgit v1.2.3 From e522d1a74e6a877f32730da4807f54cf9a996679 Mon Sep 17 00:00:00 2001 From: CNugteren Date: Sat, 13 Jun 2015 11:01:20 +0200 Subject: Added initial version of GEMV including tester and performance client --- CMakeLists.txt | 2 +- include/clblast.h | 17 +++- include/internal/database.h | 1 + include/internal/database/xgemv.h | 129 ++++++++++++++++++++++++++++++ include/internal/routines/xgemv.h | 46 +++++++++++ src/clblast.cc | 155 ++++++++++++++++++++++--------------- src/database.cc | 2 + src/kernels/xgemv.opencl | 14 +++- src/routines/xgemv.cc | 117 ++++++++++++++++++++++++++++ src/tuning/xgemv.cc | 3 + test/correctness/routines/xgemv.cc | 94 ++++++++++++++++++++++ test/correctness/testaxy.cc | 26 ++++--- test/performance/client.cc | 94 +++++++++++++++++++++- test/performance/client.h | 6 +- test/performance/routines/xgemv.cc | 107 +++++++++++++++++++++++++ test/wrapper_clblas.h | 58 ++++++++++++++ 16 files changed, 783 insertions(+), 88 deletions(-) create mode 100644 include/internal/database/xgemv.h create mode 100644 include/internal/routines/xgemv.h create mode 100644 src/routines/xgemv.cc create mode 100644 test/correctness/routines/xgemv.cc create mode 100644 test/performance/routines/xgemv.cc (limited to 'include') diff --git a/CMakeLists.txt b/CMakeLists.txt index b6697ac3..6a597f22 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,7 +96,7 @@ include_directories(${clblast_SOURCE_DIR}/include ${OPENCL_INCLUDE_DIRS}) set(KERNELS copy pad transpose padtranspose xaxpy xgemv xgemm) set(SAMPLE_PROGRAMS sgemm) set(ROUTINES_XY xaxpy) -set(ROUTINES_AXY ) +set(ROUTINES_AXY xgemv) set(ROUTINES_ABC xgemm xsymm) set(ROUTINES ${ROUTINES_XY} ${ROUTINES_AXY} ${ROUTINES_ABC}) diff --git a/include/clblast.h b/include/clblast.h index 4c3c5201..231348b8 100644 --- a/include/clblast.h +++ b/include/clblast.h @@ -85,7 +85,7 @@ enum class Precision { kHalf = 16, kSingle = 32, kDouble = 64, // Templated-precision vector-times-constant plus vector: SAXPY/DAXPY/CAXPY/ZAXPY template -StatusCode Axpy(const size_t m, const T alpha, +StatusCode Axpy(const size_t n, const T alpha, const cl_mem x_buffer, const size_t x_offset, const size_t x_inc, cl_mem y_buffer, const size_t y_offset, const size_t y_inc, cl_command_queue* queue, cl_event* event); @@ -93,10 +93,21 @@ StatusCode Axpy(const size_t m, const T alpha, // ================================================================================================= // BLAS level-2 (matrix-vector) routines +// Templated-precision generalized matrix-vector multiplication: SGEMV/DGEMV/CGEMV/ZGEMV +template +StatusCode Gemv(const Layout layout, const Transpose transpose_a, + const size_t m, const size_t n, + const T alpha, + const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, + const cl_mem x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + cl_mem y_buffer, const size_t y_offset, const size_t y_inc, + cl_command_queue* queue, cl_event* event); + // ================================================================================================= // BLAS level-3 (matrix-matrix) routines -// Templated-precision generalized matrix multiplication: SGEMM/DGEMM +// Templated-precision generalized matrix-matrix multiplication: SGEMM/DGEMM template StatusCode Gemm(const Layout layout, const Transpose transpose_a, const Transpose transpose_b, const size_t m, const size_t n, const size_t k, @@ -107,7 +118,7 @@ StatusCode Gemm(const Layout layout, const Transpose transpose_a, const Transpos cl_mem c_buffer, const size_t c_offset, const size_t c_ld, cl_command_queue* queue, cl_event* event); -// Templated-precision symmetric matrix multiplication: SSYMM/DSYMM +// Templated-precision symmetric matrix-matrix multiplication: SSYMM/DSYMM template StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, const size_t m, const size_t n, diff --git a/include/internal/database.h b/include/internal/database.h index dbbdd5c0..33ad1979 100644 --- a/include/internal/database.h +++ b/include/internal/database.h @@ -54,6 +54,7 @@ class Database { // The database consists of separate database entries, stored together in a vector static const DatabaseEntry XaxpySingle, XaxpyDouble, XaxpyComplexSingle, XaxpyComplexDouble; + static const DatabaseEntry XgemvSingle, XgemvDouble, XgemvComplexSingle, XgemvComplexDouble; static const DatabaseEntry XgemmSingle, XgemmDouble, XgemmComplexSingle, XgemmComplexDouble; static const DatabaseEntry CopySingle, CopyDouble, CopyComplexSingle, CopyComplexDouble; static const DatabaseEntry PadSingle, PadDouble, PadComplexSingle, PadComplexDouble; diff --git a/include/internal/database/xgemv.h b/include/internal/database/xgemv.h new file mode 100644 index 00000000..178d1122 --- /dev/null +++ b/include/internal/database/xgemv.h @@ -0,0 +1,129 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file populates the database with best-found tuning parameters for the Xgemv kernels. +// +// ================================================================================================= + +namespace clblast { +// ================================================================================================= + +const Database::DatabaseEntry Database::XgemvSingle = { + "Xgemv", Precision::kSingle, { + { // NVIDIA GPUs + CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { + { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // AMD GPUs + CL_DEVICE_TYPE_GPU, "AMD", { + { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // Intel GPUs + CL_DEVICE_TYPE_GPU, "Intel", { + { "Iris", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // Default + CL_DEVICE_TYPE_ALL, kDefault, { + { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + } +}; + +// ================================================================================================= + +const Database::DatabaseEntry Database::XgemvDouble = { + "Xgemv", Precision::kDouble, { + { // NVIDIA GPUs + CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { + { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // AMD GPUs + CL_DEVICE_TYPE_GPU, "AMD", { + { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // Intel GPUs + CL_DEVICE_TYPE_GPU, "Intel", { + } + }, + { // Default + CL_DEVICE_TYPE_ALL, kDefault, { + { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + } +}; +// ================================================================================================= + +const Database::DatabaseEntry Database::XgemvComplexSingle = { + "Xgemv", Precision::kComplexSingle, { + { // NVIDIA GPUs + CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { + { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // AMD GPUs + CL_DEVICE_TYPE_GPU, "AMD", { + { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // Intel GPUs + CL_DEVICE_TYPE_GPU, "Intel", { + { "Iris", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // Default + CL_DEVICE_TYPE_ALL, kDefault, { + { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + } +}; + +// ================================================================================================= + +const Database::DatabaseEntry Database::XgemvComplexDouble = { + "Xgemv", Precision::kComplexDouble, { + { // NVIDIA GPUs + CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { + { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // AMD GPUs + CL_DEVICE_TYPE_GPU, "AMD", { + { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + { // Intel GPUs + CL_DEVICE_TYPE_GPU, "Intel", { + } + }, + { // Default + CL_DEVICE_TYPE_ALL, kDefault, { + { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + } + }, + } +}; + +// ================================================================================================= +} // namespace clblast diff --git a/include/internal/routines/xgemv.h b/include/internal/routines/xgemv.h new file mode 100644 index 00000000..a3109036 --- /dev/null +++ b/include/internal/routines/xgemv.h @@ -0,0 +1,46 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements the Xgemv routine. The precision is implemented using a template argument. +// +// ================================================================================================= + +#ifndef CLBLAST_ROUTINES_XGEMV_H_ +#define CLBLAST_ROUTINES_XGEMV_H_ + +#include "internal/routine.h" + +namespace clblast { +// ================================================================================================= + +// See comment at top of file for a description of the class +template +class Xgemv: public Routine { + public: + Xgemv(CommandQueue &queue, Event &event); + + // Templated-precision implementation of the routine + StatusCode DoGemv(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, + const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, + const Buffer &x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + const Buffer &y_buffer, const size_t y_offset, const size_t y_inc); + + private: + // Static variable to get the precision + const static Precision precision_; +}; + +// ================================================================================================= +} // namespace clblast + +// CLBLAST_ROUTINES_XGEMV_H_ +#endif diff --git a/src/clblast.cc b/src/clblast.cc index 72de3b24..1d4d0621 100644 --- a/src/clblast.cc +++ b/src/clblast.cc @@ -20,6 +20,9 @@ // BLAS level-1 includes #include "internal/routines/xaxpy.h" +// BLAS level-2 includes +#include "internal/routines/xgemv.h" + // BLAS level-3 includes #include "internal/routines/xgemm.h" #include "internal/routines/xsymm.h" @@ -36,18 +39,18 @@ StatusCode Axpy(const size_t n, const T alpha, cl_command_queue* queue, cl_event* event) { auto queue_cpp = CommandQueue(*queue); auto event_cpp = Event(*event); - auto xaxpy = Xaxpy(queue_cpp, event_cpp); + auto routine = Xaxpy(queue_cpp, event_cpp); // Loads the kernel source-code as an include (C++11 raw string literal) std::string kernel_source = #include "kernels/xaxpy.opencl" - auto status = xaxpy.SetUp(kernel_source); + auto status = routine.SetUp(kernel_source); if (status != StatusCode::kSuccess) { return status; } // Runs the routine - return xaxpy.DoAxpy(n, alpha, - Buffer(x_buffer), x_offset, x_inc, - Buffer(y_buffer), y_offset, y_inc); + return routine.DoAxpy(n, alpha, + Buffer(x_buffer), x_offset, x_inc, + Buffer(y_buffer), y_offset, y_inc); } template StatusCode Axpy(const size_t, const float, const cl_mem, const size_t, const size_t, @@ -69,22 +72,70 @@ template StatusCode Axpy(const size_t, const double2, // ================================================================================================= // BLAS level-2 (matrix-vector) routines +// GEMV +template +StatusCode Gemv(const Layout layout, const Transpose transpose_a, + const size_t m, const size_t n, const T alpha, + const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, + const cl_mem x_buffer, const size_t x_offset, const size_t x_inc, const T beta, + cl_mem y_buffer, const size_t y_offset, const size_t y_inc, + cl_command_queue* queue, cl_event* event) { + + auto queue_cpp = CommandQueue(*queue); + auto event_cpp = Event(*event); + auto routine = Xgemv(queue_cpp, event_cpp); + + // Loads the kernel source-code as an include (C++11 raw string literal) + std::string kernel_source = + #include "kernels/xgemv.opencl" + auto status = routine.SetUp(kernel_source); + if (status != StatusCode::kSuccess) { return status; } + + // Runs the routine + return routine.DoGemv(layout, transpose_a, m, n, alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(x_buffer), x_offset, x_inc, beta, + Buffer(y_buffer), y_offset, y_inc); +} +template StatusCode Gemv(const Layout, const Transpose, + const size_t, const size_t, const float, + const cl_mem, const size_t, const size_t, + const cl_mem, const size_t, const size_t, const float, + cl_mem, const size_t, const size_t, + cl_command_queue*, cl_event*); +template StatusCode Gemv(const Layout, const Transpose, + const size_t, const size_t, const double, + const cl_mem, const size_t, const size_t, + const cl_mem, const size_t, const size_t, const double, + cl_mem, const size_t, const size_t, + cl_command_queue*, cl_event*); +template StatusCode Gemv(const Layout, const Transpose, + const size_t, const size_t, const float2, + const cl_mem, const size_t, const size_t, + const cl_mem, const size_t, const size_t, const float2, + cl_mem, const size_t, const size_t, + cl_command_queue*, cl_event*); +template StatusCode Gemv(const Layout, const Transpose, + const size_t, const size_t, const double2, + const cl_mem, const size_t, const size_t, + const cl_mem, const size_t, const size_t, const double2, + cl_mem, const size_t, const size_t, + cl_command_queue*, cl_event*); + // ================================================================================================= // BLAS level-3 (matrix-matrix) routines // GEMM template StatusCode Gemm(const Layout layout, const Transpose transpose_a, const Transpose transpose_b, - const size_t m, const size_t n, const size_t k, - const T alpha, + const size_t m, const size_t n, const size_t k, const T alpha, const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, - const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, - const T beta, + const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const T beta, cl_mem c_buffer, const size_t c_offset, const size_t c_ld, cl_command_queue* queue, cl_event* event) { auto queue_cpp = CommandQueue(*queue); auto event_cpp = Event(*event); - auto xgemm = Xgemm(queue_cpp, event_cpp); + auto routine = Xgemm(queue_cpp, event_cpp); // Loads the kernel source-code as an include (C++11 raw string literal) std::string common_source1 = @@ -97,50 +148,39 @@ StatusCode Gemm(const Layout layout, const Transpose transpose_a, const Transpos #include "kernels/padtranspose.opencl" std::string kernel_source = #include "kernels/xgemm.opencl" - auto status = xgemm.SetUp(common_source1 + common_source2 + common_source3 + common_source4 + - kernel_source); + auto status = routine.SetUp(common_source1 + common_source2 + common_source3 + common_source4 + + kernel_source); if (status != StatusCode::kSuccess) { return status; } // Runs the routine - return xgemm.DoGemm(layout, transpose_a, transpose_b, - m, n, k, - alpha, - Buffer(a_buffer), a_offset, a_ld, - Buffer(b_buffer), b_offset, b_ld, - beta, - Buffer(c_buffer), c_offset, c_ld); + return routine.DoGemm(layout, transpose_a, transpose_b, m, n, k, alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld, beta, + Buffer(c_buffer), c_offset, c_ld); } template StatusCode Gemm(const Layout, const Transpose, const Transpose, - const size_t, const size_t, const size_t, - const float, - const cl_mem, const size_t, const size_t, + const size_t, const size_t, const size_t, const float, const cl_mem, const size_t, const size_t, - const float, + const cl_mem, const size_t, const size_t, const float, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); template StatusCode Gemm(const Layout, const Transpose, const Transpose, - const size_t, const size_t, const size_t, - const double, - const cl_mem, const size_t, const size_t, + const size_t, const size_t, const size_t, const double, const cl_mem, const size_t, const size_t, - const double, + const cl_mem, const size_t, const size_t, const double, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); /* template StatusCode Gemm(const Layout, const Transpose, const Transpose, - const size_t, const size_t, const size_t, - const float2, + const size_t, const size_t, const size_t, const float2, const cl_mem, const size_t, const size_t, - const cl_mem, const size_t, const size_t, - const float2, + const cl_mem, const size_t, const size_t, const float2, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); template StatusCode Gemm(const Layout, const Transpose, const Transpose, - const size_t, const size_t, const size_t, - const double2, - const cl_mem, const size_t, const size_t, + const size_t, const size_t, const size_t, const double2, const cl_mem, const size_t, const size_t, - const double2, + const cl_mem, const size_t, const size_t, const double2, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); */ @@ -150,16 +190,14 @@ template StatusCode Gemm(const Layout, const Transpose, const Transpose // SYMM template StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, - const size_t m, const size_t n, - const T alpha, + const size_t m, const size_t n, const T alpha, const cl_mem a_buffer, const size_t a_offset, const size_t a_ld, - const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, - const T beta, + const cl_mem b_buffer, const size_t b_offset, const size_t b_ld, const T beta, cl_mem c_buffer, const size_t c_offset, const size_t c_ld, cl_command_queue* queue, cl_event* event) { auto queue_cpp = CommandQueue(*queue); auto event_cpp = Event(*event); - auto xsymm = Xsymm(queue_cpp, event_cpp); + auto routine = Xsymm(queue_cpp, event_cpp); // Loads the kernel source-code as an include (C++11 raw string literal) std::string common_source1 = @@ -172,50 +210,39 @@ StatusCode Symm(const Layout layout, const Side side, const Triangle triangle, #include "kernels/padtranspose.opencl" std::string kernel_source = #include "kernels/xgemm.opencl" - auto status = xsymm.SetUp(common_source1 + common_source2 + common_source3 + common_source4 + + auto status = routine.SetUp(common_source1 + common_source2 + common_source3 + common_source4 + kernel_source); if (status != StatusCode::kSuccess) { return status; } // Runs the routine - return xsymm.DoSymm(layout, side, triangle, - m, n, - alpha, - Buffer(a_buffer), a_offset, a_ld, - Buffer(b_buffer), b_offset, b_ld, - beta, - Buffer(c_buffer), c_offset, c_ld); + return routine.DoSymm(layout, side, triangle, m, n, alpha, + Buffer(a_buffer), a_offset, a_ld, + Buffer(b_buffer), b_offset, b_ld, beta, + Buffer(c_buffer), c_offset, c_ld); } template StatusCode Symm(const Layout, const Side, const Triangle, - const size_t, const size_t, - const float, + const size_t, const size_t, const float, const cl_mem, const size_t, const size_t, - const cl_mem, const size_t, const size_t, - const float, + const cl_mem, const size_t, const size_t, const float, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); template StatusCode Symm(const Layout, const Side, const Triangle, - const size_t, const size_t, - const double, - const cl_mem, const size_t, const size_t, + const size_t, const size_t, const double, const cl_mem, const size_t, const size_t, - const double, + const cl_mem, const size_t, const size_t, const double, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); /* template StatusCode Symm(const Layout, const Side, const Triangle, - const size_t, const size_t, - const float2, + const size_t, const size_t, const float2, const cl_mem, const size_t, const size_t, - const cl_mem, const size_t, const size_t, - const float2, + const cl_mem, const size_t, const size_t, const float2, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); template StatusCode Symm(const Layout, const Side, const Triangle, - const size_t, const size_t, - const double2, - const cl_mem, const size_t, const size_t, + const size_t, const size_t, const double2, const cl_mem, const size_t, const size_t, - const double2, + const cl_mem, const size_t, const size_t, const double2, cl_mem, const size_t, const size_t, cl_command_queue*, cl_event*); */ diff --git a/src/database.cc b/src/database.cc index beaa122b..4d9d844e 100644 --- a/src/database.cc +++ b/src/database.cc @@ -13,6 +13,7 @@ #include "internal/database.h" #include "internal/database/xaxpy.h" +#include "internal/database/xgemv.h" #include "internal/database/xgemm.h" #include "internal/database/copy.h" #include "internal/database/pad.h" @@ -27,6 +28,7 @@ namespace clblast { // Initializes the database const std::vector Database::database = { XaxpySingle, XaxpyDouble, XaxpyComplexSingle, XaxpyComplexDouble, + XgemvSingle, XgemvDouble, XgemvComplexSingle, XgemvComplexDouble, XgemmSingle, XgemmDouble, XgemmComplexSingle, XgemmComplexDouble, CopySingle, CopyDouble, CopyComplexSingle, CopyComplexDouble, PadSingle, PadDouble, PadComplexSingle, PadComplexDouble, diff --git a/src/kernels/xgemv.opencl b/src/kernels/xgemv.opencl index c90bc26e..1d2ab435 100644 --- a/src/kernels/xgemv.opencl +++ b/src/kernels/xgemv.opencl @@ -34,7 +34,8 @@ R"( // The gemv kernel __attribute__((reqd_work_group_size(WGS, 1, 1))) __kernel void Xgemv(const int m, const int n, const real alpha, const real beta, - const __global real* restrict agm, + const int a_transposed, + const __global real* restrict agm, const int a_offset, const int a_ld, const __global real* restrict xgm, const int x_offset, const int x_inc, __global real* ygm, const int y_offset, const int y_inc) { @@ -45,8 +46,15 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta, // Loop over the elements of the matrix A real acc; SetToZero(acc); - for (int k=0; k +// +// This file implements the Xgemv class (see the header for information about the class). +// +// ================================================================================================= + +#include "internal/routines/xgemv.h" + +#include +#include + +namespace clblast { +// ================================================================================================= + +// Specific implementations to get the memory-type based on a template argument +template <> const Precision Xgemv::precision_ = Precision::kSingle; +template <> const Precision Xgemv::precision_ = Precision::kDouble; +template <> const Precision Xgemv::precision_ = Precision::kComplexSingle; +template <> const Precision Xgemv::precision_ = Precision::kComplexDouble; + +// ================================================================================================= + +// Constructor: forwards to base class constructor +template +Xgemv::Xgemv(CommandQueue &queue, Event &event): + Routine(queue, event, {"Xgemv"}, precision_) { +} + +// ================================================================================================= + +// The main routine +template +StatusCode Xgemv::DoGemv(const Layout layout, const Transpose a_transpose, + const size_t m, const size_t n, + const T alpha, + const Buffer &a_buffer, const size_t a_offset, const size_t a_ld, + const Buffer &x_buffer, const size_t x_offset, const size_t x_inc, + const T beta, + const Buffer &y_buffer, const size_t y_offset, const size_t y_inc) { + + // Makes sure all dimensions are larger than zero + if (m == 0 || n == 0) { return StatusCode::kInvalidDimension; } + + // Computes whether or not the matrix has an alternative layout (row or column-major). + auto a_altlayout = (layout == Layout::kRowMajor); + auto a_one = (a_altlayout) ? n : m; + auto a_two = (a_altlayout) ? m : n; + + // Swap m and n if the matrix is transposed + auto a_transposed = (a_transpose == Transpose::kYes); + auto m_real = (a_transposed) ? n : m; + auto n_real = (a_transposed) ? m : n; + + // Determines whether the kernel needs to perform rotated access ('^' is the XOR operator) + auto a_rotated = a_transposed ^ a_altlayout; + + // Tests the matrix and the vectors for validity + auto status = TestMatrixA(a_one, a_two, a_buffer, a_offset, a_ld, sizeof(T)); + if (ErrorIn(status)) { return status; } + status = TestVectorX(n_real, x_buffer, x_offset, x_inc, sizeof(T)); + if (ErrorIn(status)) { return status; } + status = TestVectorY(m_real, y_buffer, y_offset, y_inc, sizeof(T)); + if (ErrorIn(status)) { return status; } + + // Retrieves the Xgemv kernel from the compiled binary + try { + auto program = GetProgramFromCache(); + auto kernel = Kernel(program, "Xgemv"); + + // Sets the kernel arguments + kernel.SetArgument(0, static_cast(m_real)); + kernel.SetArgument(1, static_cast(n_real)); + kernel.SetArgument(2, alpha); + kernel.SetArgument(3, beta); + kernel.SetArgument(4, static_cast(a_rotated)); + kernel.SetArgument(5, a_buffer()); + kernel.SetArgument(6, static_cast(a_offset)); + kernel.SetArgument(7, static_cast(a_ld)); + kernel.SetArgument(8, x_buffer()); + kernel.SetArgument(9, static_cast(x_offset)); + kernel.SetArgument(10, static_cast(x_inc)); + kernel.SetArgument(11, y_buffer()); + kernel.SetArgument(12, static_cast(y_offset)); + kernel.SetArgument(13, static_cast(y_inc)); + + // Launches the kernel + auto m_ceiled = Ceil(m_real, db_["WGS"]); + auto global = std::vector{CeilDiv(m_ceiled, db_["WPT"])}; + auto local = std::vector{db_["WGS"]}; + status = RunKernel(kernel, global, local); + if (ErrorIn(status)) { return status; } + + // Waits for all kernels to finish + queue_.Finish(); + + // Succesfully finished the computation + return StatusCode::kSuccess; + } catch (...) { return StatusCode::kInvalidKernel; } +} + +// ================================================================================================= + +// Compiles the templated class +template class Xgemv; +template class Xgemv; +template class Xgemv; +template class Xgemv; + +// ================================================================================================= +} // namespace clblast diff --git a/src/tuning/xgemv.cc b/src/tuning/xgemv.cc index 1ee7c7bf..74bb77a5 100644 --- a/src/tuning/xgemv.cc +++ b/src/tuning/xgemv.cc @@ -55,7 +55,10 @@ void XgemvTune(const Arguments &args, tuner.AddArgumentScalar(static_cast(args.n)); tuner.AddArgumentScalar(args.alpha); tuner.AddArgumentScalar(args.beta); + tuner.AddArgumentScalar(0); tuner.AddArgumentInput(a_mat); + tuner.AddArgumentScalar(0); + tuner.AddArgumentScalar(static_cast(args.n)); tuner.AddArgumentInput(x_vec); tuner.AddArgumentScalar(0); tuner.AddArgumentScalar(1); diff --git a/test/correctness/routines/xgemv.cc b/test/correctness/routines/xgemv.cc new file mode 100644 index 00000000..94ae147f --- /dev/null +++ b/test/correctness/routines/xgemv.cc @@ -0,0 +1,94 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under the MIT license. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements the tests for the Xgemv routine. It is based on the TestAXY class. +// +// ================================================================================================= + +#include "wrapper_clblas.h" +#include "correctness/testaxy.h" + +namespace clblast { +// ================================================================================================= + +// The correctness tester, containing the function calls to CLBlast and to clBLAS for comparison. +template +void XgemvTest(int argc, char *argv[], const bool silent, const std::string &name) { + + // Creates the CLBlast lambda + auto clblast_lambda = [](const Arguments &args, + const Buffer &a_mat, const Buffer &x_vec, const Buffer &y_vec, + CommandQueue &queue) -> StatusCode { + auto queue_plain = queue(); + auto event = cl_event{}; + return Gemv(args.layout, args.a_transpose, args.m, args.n, args.alpha, + a_mat(), args.a_offset, args.a_ld, + x_vec(), args.x_offset, args.x_inc, args.beta, + y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + }; + + // Creates the clBLAS lambda (for comparison) + auto clblas_lambda = [](const Arguments &args, + const Buffer &a_mat, const Buffer &x_vec, const Buffer &y_vec, + CommandQueue &queue) -> StatusCode { + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = clblasXgemv(static_cast(args.layout), + static_cast(args.a_transpose), + args.m, args.n, args.alpha, + a_mat(), args.a_offset, args.a_ld, + x_vec(), args.x_offset, args.x_inc, args.beta, + y_vec(), args.y_offset, args.y_inc, + 1, &queue_plain, 0, nullptr, &event); + return static_cast(status); + }; + + // Selects the platform and device on which to test (command-line options) + auto help = std::string{"Options given/available:\n"}; + const auto platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0}); + const auto device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0}); + if (!silent) { fprintf(stdout, "\n* %s\n", help.c_str()); } + + // Initializes the other arguments relevant for this routine + auto args = Arguments{}; + const auto options = std::vector{kArgM, kArgN, kArgLayout, kArgATransp, + kArgALeadDim, kArgXInc, kArgYInc, + kArgAOffset, kArgXOffset, kArgYOffset}; + + // Creates a tester + TestAXY tester{platform_id, device_id, name, options, clblast_lambda, clblas_lambda}; + + // Loops over the test-cases from a data-layout point of view + for (auto &layout: {Layout::kRowMajor, Layout::kColMajor}) { + args.layout = layout; + for (auto &a_transpose: {Transpose::kNo, Transpose::kYes}) { + args.a_transpose = a_transpose; + const auto case_name = ToString(layout)+" "+ToString(a_transpose); + + // Runs the tests + tester.TestRegular(args, case_name); + tester.TestInvalidBufferSizes(args, case_name); + } + } +} + +// ================================================================================================= +} // namespace clblast + +// Main function (not within the clblast namespace) +int main(int argc, char *argv[]) { + clblast::XgemvTest(argc, argv, false, "SGEMV"); + //clblast::XgemvTest(argc, argv, true, "DGEMV"); + //clblast::XgemvTest(argc, argv, true, "CGEMV"); + //clblast::XgemvTest(argc, argv, true, "ZGEMV"); + return 0; +} + +// ================================================================================================= diff --git a/test/correctness/testaxy.cc b/test/correctness/testaxy.cc index 1e01a0e8..ed0b06ab 100644 --- a/test/correctness/testaxy.cc +++ b/test/correctness/testaxy.cc @@ -49,12 +49,6 @@ template void TestAXY::TestRegular(Arguments &args, const std::string &name) { TestStart("regular behaviour", name); - // Computes whether or not the matrix is transposed. Note that we assume a default of - // column-major and no-transpose. If one of them is different (but not both), then rotated - // is considered true. - auto a_rotated = (args.layout == Layout::kColMajor && args.a_transpose != Transpose::kNo) || - (args.layout == Layout::kRowMajor && args.a_transpose == Transpose::kNo); - // Iterates over the dimension for the matrix and vectors for (auto &m: kMatrixVectorDims) { args.m = m; @@ -62,7 +56,12 @@ void TestAXY::TestRegular(Arguments &args, const std::string &name) { args.n = n; // Computes the second dimension of the matrix taking the rotation into account - auto a_two = (a_rotated) ? m : n; + auto a_two = (args.layout == Layout::kRowMajor) ? args.m : args.n; + + // Computes the vector sizes in case the matrix is transposed + auto a_transposed = (args.a_transpose == Transpose::kYes); + auto m_real = (a_transposed) ? n : m; + auto n_real = (a_transposed) ? m : n; // Iterates over the leading-dimension values and the offsets of the matrix for (auto &a_ld: kMatrixVectorDims) { @@ -82,8 +81,8 @@ void TestAXY::TestRegular(Arguments &args, const std::string &name) { // Computes the buffer sizes auto a_size = a_two * a_ld + a_offset; - auto x_size = n * x_inc + x_offset; - auto y_size = n * y_inc + y_offset; + auto x_size = n_real * x_inc + x_offset; + auto y_size = m_real * y_inc + y_offset; if (a_size < 1 || x_size < 1 || y_size < 1) { continue; } // Creates the OpenCL buffers @@ -124,15 +123,15 @@ void TestAXY::TestRegular(Arguments &args, const std::string &name) { // Checks for differences in the output auto errors = size_t{0}; - for (auto idn=size_t{0}; idn::TestInvalidBufferSizes(Arguments &args, const std::string &n args.m = kBufferSize; args.n = kBufferSize; args.a_ld = kBufferSize; + args.a_offset = 0; + args.x_offset = 0; + args.y_offset = 0; // Iterates over test buffer sizes const std::vector kMatrixSizes = {0, kBufferSize*kBufferSize-1, kBufferSize*kBufferSize}; diff --git a/test/performance/client.cc b/test/performance/client.cc index ddaea0e1..3b07970c 100644 --- a/test/performance/client.cc +++ b/test/performance/client.cc @@ -26,8 +26,12 @@ template void ClientXY(int argc, char *argv[], Routine2 client_routine, const std::vector &options) { + // Function to determine how to find the default value of the leading dimension of matrix A. + // Note: this is not relevant for this client but given anyway. + auto default_ld_a = [](const Arguments args) { return args.n; }; + // Simple command line argument parser with defaults - auto args = ParseArguments(argc, argv, options); + auto args = ParseArguments(argc, argv, options, default_ld_a); if (args.print_help) { return; } // Prints the header of the output table @@ -81,13 +85,94 @@ template void ClientXY(int, char **, Routine2, const std::vect // ================================================================================================= +// This is the matrix-vector-vector variant of the set-up/tear-down client routine. +template +void ClientAXY(int argc, char *argv[], Routine3 client_routine, + const std::vector &options) { + + // Function to determine how to find the default value of the leading dimension of matrix A + auto default_ld_a = [](const Arguments args) { return args.n; }; + + // Simple command line argument parser with defaults + auto args = ParseArguments(argc, argv, options, default_ld_a); + if (args.print_help) { return; } + + // Prints the header of the output table + PrintTableHeader(args.silent, options); + + // Initializes OpenCL and the libraries + auto platform = Platform(args.platform_id); + auto device = Device(platform, kDeviceType, args.device_id); + auto context = Context(device); + auto queue = CommandQueue(context, device); + if (args.compare_clblas) { clblasSetup(); } + + // Iterates over all "num_step" values jumping by "step" each time + auto s = size_t{0}; + while(true) { + + // Computes the second dimension of the matrix taking the rotation into account + auto a_two = (args.layout == Layout::kRowMajor) ? args.m : args.n; + + // Computes the vector sizes in case the matrix is transposed + auto a_transposed = (args.a_transpose == Transpose::kYes); + auto m_real = (a_transposed) ? args.n : args.m; + auto n_real = (a_transposed) ? args.m : args.n; + + // Computes the data sizes + auto a_size = a_two * args.a_ld + args.a_offset; + auto x_size = n_real*args.x_inc + args.x_offset; + auto y_size = m_real*args.y_inc + args.y_offset; + + // Populates input host vectors with random data + std::vector a_source(a_size); + std::vector x_source(x_size); + std::vector y_source(y_size); + PopulateVector(a_source); + PopulateVector(x_source); + PopulateVector(y_source); + + // Creates the vectors on the device + auto a_buffer = Buffer(context, CL_MEM_READ_WRITE, a_size*sizeof(T)); + auto x_buffer = Buffer(context, CL_MEM_READ_WRITE, x_size*sizeof(T)); + auto y_buffer = Buffer(context, CL_MEM_READ_WRITE, y_size*sizeof(T)); + a_buffer.WriteBuffer(queue, a_size*sizeof(T), a_source); + x_buffer.WriteBuffer(queue, x_size*sizeof(T), x_source); + y_buffer.WriteBuffer(queue, y_size*sizeof(T), y_source); + + // Runs the routine-specific code + client_routine(args, a_buffer, x_buffer, y_buffer, queue); + + // Makes the jump to the next step + ++s; + if (s >= args.num_steps) { break; } + args.m += args.step; + args.n += args.step; + args.a_ld += args.step; + } + + // Cleans-up and returns + if (args.compare_clblas) { clblasTeardown(); } +} + +// Compiles the above function +template void ClientAXY(int, char **, Routine3, const std::vector&); +template void ClientAXY(int, char **, Routine3, const std::vector&); +template void ClientAXY(int, char **, Routine3, const std::vector&); +template void ClientAXY(int, char **, Routine3, const std::vector&); + +// ================================================================================================= + // This is the matrix-matrix-matrix variant of the set-up/tear-down client routine. template void ClientABC(int argc, char *argv[], Routine3 client_routine, const std::vector &options) { + // Function to determine how to find the default value of the leading dimension of matrix A + auto default_ld_a = [](const Arguments args) { return args.m; }; + // Simple command line argument parser with defaults - auto args = ParseArguments(argc, argv, options); + auto args = ParseArguments(argc, argv, options, default_ld_a); if (args.print_help) { return; } // Prints the header of the output table @@ -167,7 +252,8 @@ template void ClientABC(int, char **, Routine3, const std::vec // applicable, but are searched for anyway to be able to create one common argument parser. All // arguments have a default value in case they are not found. template -Arguments ParseArguments(int argc, char *argv[], const std::vector &options) { +Arguments ParseArguments(int argc, char *argv[], const std::vector &options, + const std::function)> default_ld_a) { auto args = Arguments{}; auto help = std::string{"Options given/available:\n"}; @@ -193,7 +279,7 @@ Arguments ParseArguments(int argc, char *argv[], const std::vector void ClientXY(int argc, char *argv[], Routine2 client_routine, const std::vector &options); template +void ClientAXY(int argc, char *argv[], Routine3 client_routine, + const std::vector &options); +template void ClientABC(int argc, char *argv[], Routine3 client_routine, const std::vector &options); @@ -57,7 +60,8 @@ void ClientABC(int argc, char *argv[], Routine3 client_routine, // Parses all command-line arguments, filling in the arguments structure. If no command-line // argument is given for a particular argument, it is filled in with a default value. template -Arguments ParseArguments(int argc, char *argv[], const std::vector &options); +Arguments ParseArguments(int argc, char *argv[], const std::vector &options, + const std::function)> default_ld_a); // Retrieves only the precision command-line argument, since the above function is templated based // on the precision diff --git a/test/performance/routines/xgemv.cc b/test/performance/routines/xgemv.cc new file mode 100644 index 00000000..43222396 --- /dev/null +++ b/test/performance/routines/xgemv.cc @@ -0,0 +1,107 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements the Xgemv command-line interface tester. +// +// ================================================================================================= + +#include +#include +#include + +#include "wrapper_clblas.h" +#include "performance/client.h" + +namespace clblast { +// ================================================================================================= + +// The client, used for performance testing. It contains the function calls to CLBlast and to other +// libraries to compare against. +template +void PerformanceXgemv(const Arguments &args, + const Buffer &a_mat, const Buffer &x_vec, const Buffer &y_vec, + CommandQueue &queue) { + + // Creates the CLBlast lambda + auto clblast_lambda = [&args, &a_mat, &x_vec, &y_vec, &queue]() { + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Gemv(args.layout, args.a_transpose, args.m, args.n, args.alpha, + a_mat(), args.a_offset, args.a_ld, + x_vec(), args.x_offset, args.x_inc, args.beta, + y_vec(), args.y_offset, args.y_inc, + &queue_plain, &event); + clWaitForEvents(1, &event); + if (status != StatusCode::kSuccess) { + throw std::runtime_error("CLBlast error: "+ToString(static_cast(status))); + } + }; + + // Creates the clBLAS lambda (for comparison) + auto clblas_lambda = [&args, &a_mat, &x_vec, &y_vec, &queue]() { + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = clblasXgemv(static_cast(args.layout), + static_cast(args.a_transpose), + args.m, args.n, args.alpha, + a_mat(), args.a_offset, args.a_ld, + x_vec(), args.x_offset, args.x_inc, args.beta, + y_vec(), args.y_offset, args.y_inc, + 1, &queue_plain, 0, nullptr, &event); + clWaitForEvents(1, &event); + if (status != CL_SUCCESS) { + throw std::runtime_error("clBLAS error: "+ToString(static_cast(status))); + } + }; + + // Runs the routines and collect the timings + auto ms_clblast = TimedExecution(args.num_runs, clblast_lambda); + auto ms_clblas = TimedExecution(args.num_runs, clblas_lambda); + + // Prints the performance of both libraries + const auto flops = 2 * args.m * args.n; + const auto bytes = (args.m*args.n + 2*args.m + args.n) * sizeof(T); + const auto output_ints = std::vector{args.m, args.n, + static_cast(args.layout), + static_cast(args.a_transpose), + args.a_ld, args.x_inc, args.y_inc, + args.a_offset, args.x_offset, args.y_offset}; + const auto output_strings = std::vector{ToString(args.alpha), + ToString(args.beta)}; + PrintTableRow(output_ints, output_strings, args.no_abbrv, + ms_clblast, ms_clblas, flops, bytes); +} + +// ================================================================================================= + +// Main function which calls the common client code with the routine-specific function as argument. +void ClientXgemv(int argc, char *argv[]) { + const auto o = std::vector{kArgM, kArgN, kArgLayout, kArgATransp, + kArgALeadDim, kArgXInc, kArgYInc, + kArgAOffset, kArgXOffset, kArgYOffset, + kArgAlpha, kArgBeta}; + switch(GetPrecision(argc, argv)) { + case Precision::kHalf: throw std::runtime_error("Unsupported precision mode"); + case Precision::kSingle: ClientAXY(argc, argv, PerformanceXgemv, o); break; + case Precision::kDouble: ClientAXY(argc, argv, PerformanceXgemv, o); break; + case Precision::kComplexSingle: ClientAXY(argc, argv, PerformanceXgemv, o); break; + case Precision::kComplexDouble: ClientAXY(argc, argv, PerformanceXgemv, o); break; + } +} + +// ================================================================================================= +} // namespace clblast + +// Main function (not within the clblast namespace) +int main(int argc, char *argv[]) { + clblast::ClientXgemv(argc, argv); + return 0; +} + +// ================================================================================================= diff --git a/test/wrapper_clblas.h b/test/wrapper_clblas.h index 7c71fcaa..093a8742 100644 --- a/test/wrapper_clblas.h +++ b/test/wrapper_clblas.h @@ -74,6 +74,64 @@ clblasStatus clblasXaxpy( // ================================================================================================= // BLAS level-2 (matrix-vector) routines +// Calls {clblasSgemv, clblasDgemv, clblasCgemv, clblasZgemv} with the arguments forwarded. +clblasStatus clblasXgemv( + clblasOrder layout, clblasTranspose tran_a, size_t m, size_t n, float alpha, + const cl_mem a_mat, size_t a_offset, size_t a_ld, + const cl_mem x_vec, size_t x_offset, size_t x_inc, float beta, + const cl_mem y_vec, size_t y_offset, size_t y_inc, + cl_uint num_queues, cl_command_queue *queues, + cl_uint num_wait_events, const cl_event *wait_events, cl_event *events) { + return clblasSgemv(layout, tran_a, m, n, alpha, + a_mat, a_offset, a_ld, + x_vec, x_offset, static_cast(x_inc), beta, + y_vec, y_offset, static_cast(y_inc), + num_queues, queues, num_wait_events, wait_events, events); +} +clblasStatus clblasXgemv( + clblasOrder layout, clblasTranspose tran_a, size_t m, size_t n, double alpha, + const cl_mem a_mat, size_t a_offset, size_t a_ld, + const cl_mem x_vec, size_t x_offset, size_t x_inc, double beta, + const cl_mem y_vec, size_t y_offset, size_t y_inc, + cl_uint num_queues, cl_command_queue *queues, + cl_uint num_wait_events, const cl_event *wait_events, cl_event *events) { + return clblasDgemv(layout, tran_a, m, n, alpha, + a_mat, a_offset, a_ld, + x_vec, x_offset, static_cast(x_inc), beta, + y_vec, y_offset, static_cast(y_inc), + num_queues, queues, num_wait_events, wait_events, events); +} +clblasStatus clblasXgemv( + clblasOrder layout, clblasTranspose tran_a, size_t m, size_t n, float2 alpha, + const cl_mem a_mat, size_t a_offset, size_t a_ld, + const cl_mem x_vec, size_t x_offset, size_t x_inc, float2 beta, + const cl_mem y_vec, size_t y_offset, size_t y_inc, + cl_uint num_queues, cl_command_queue *queues, + cl_uint num_wait_events, const cl_event *wait_events, cl_event *events) { + auto cl_alpha = cl_float2{{alpha.real(), alpha.imag()}}; + auto cl_beta = cl_float2{{beta.real(), beta.imag()}}; + return clblasCgemv(layout, tran_a, m, n, cl_alpha, + a_mat, a_offset, a_ld, + x_vec, x_offset, static_cast(x_inc), cl_beta, + y_vec, y_offset, static_cast(y_inc), + num_queues, queues, num_wait_events, wait_events, events); +} +clblasStatus clblasXgemv( + clblasOrder layout, clblasTranspose tran_a, size_t m, size_t n, double2 alpha, + const cl_mem a_mat, size_t a_offset, size_t a_ld, + const cl_mem x_vec, size_t x_offset, size_t x_inc, double2 beta, + const cl_mem y_vec, size_t y_offset, size_t y_inc, + cl_uint num_queues, cl_command_queue *queues, + cl_uint num_wait_events, const cl_event *wait_events, cl_event *events) { + auto cl_alpha = cl_double2{{alpha.real(), alpha.imag()}}; + auto cl_beta = cl_double2{{beta.real(), beta.imag()}}; + return clblasZgemv(layout, tran_a, m, n, cl_alpha, + a_mat, a_offset, a_ld, + x_vec, x_offset, static_cast(x_inc), cl_beta, + y_vec, y_offset, static_cast(y_inc), + num_queues, queues, num_wait_events, wait_events, events); +} + // ================================================================================================= // BLAS level-3 (matrix-matrix) routines -- cgit v1.2.3 From 9b66883e9c016ed749e4e492416ac42b63a4ddd2 Mon Sep 17 00:00:00 2001 From: CNugteren Date: Sat, 13 Jun 2015 14:10:07 +0200 Subject: Improved GEMV kernel with local memory and a tunable WPT --- include/internal/database/xgemv.h | 2 +- src/kernels/xgemv.opencl | 92 +++++++++++++++++++++++++++++++++------ src/tuning/xgemv.cc | 6 +-- 3 files changed, 83 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/internal/database/xgemv.h b/include/internal/database/xgemv.h index 178d1122..b47df4d2 100644 --- a/include/internal/database/xgemv.h +++ b/include/internal/database/xgemv.h @@ -30,7 +30,7 @@ const Database::DatabaseEntry Database::XgemvSingle = { }, { // Intel GPUs CL_DEVICE_TYPE_GPU, "Intel", { - { "Iris", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Iris", { {"WGS",256}, {"WPT",2}, {"VW",1} } }, } }, { // Default diff --git a/src/kernels/xgemv.opencl b/src/kernels/xgemv.opencl index 1d2ab435..de7d5a80 100644 --- a/src/kernels/xgemv.opencl +++ b/src/kernels/xgemv.opencl @@ -39,24 +39,90 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta, const __global real* restrict xgm, const int x_offset, const int x_inc, __global real* ygm, const int y_offset, const int y_inc) { - // Loops over the work that needs to be done (allows for an arbitrary number of threads) + // Local memory for the vector X + __local real xlm[WGS]; + + // Initializes the accumulation register + real acc[WPT]; #pragma unroll - for (int id = get_global_id(0); id &args, tuner.SetReferenceFromString(sources, "Xgemv", {args.m}, {64}); // Sets the tunable parameters and their possible values - tuner.AddParameter(id, "WGS", {64, 128}); - tuner.AddParameter(id, "WPT", {1}); + tuner.AddParameter(id, "WGS", {64, 128, 256, 512, 1024, 1536, 2048}); + tuner.AddParameter(id, "WPT", {1, 2, 4}); tuner.AddParameter(id, "VW", {1}); // Tests for a specific precision @@ -58,7 +58,7 @@ void XgemvTune(const Arguments &args, tuner.AddArgumentScalar(0); tuner.AddArgumentInput(a_mat); tuner.AddArgumentScalar(0); - tuner.AddArgumentScalar(static_cast(args.n)); + tuner.AddArgumentScalar(static_cast(args.m)); tuner.AddArgumentInput(x_vec); tuner.AddArgumentScalar(0); tuner.AddArgumentScalar(1); -- cgit v1.2.3 From 4b3e3dcfe0a2bf97b2703b1f1fd1488c99244ff4 Mon Sep 17 00:00:00 2001 From: CNugteren Date: Sat, 13 Jun 2015 20:46:01 +0200 Subject: Added a fast GEMV kernel with vector loads, no tail, and fewer if-statements --- include/internal/database/xgemv.h | 2 +- src/kernels/xgemv.opencl | 235 ++++++++++++++++++++++++++++++-------- src/routines/xgemv.cc | 11 +- src/tuning/tuning.cc | 1 + src/tuning/xgemv.cc | 14 ++- 5 files changed, 209 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/include/internal/database/xgemv.h b/include/internal/database/xgemv.h index b47df4d2..37d33487 100644 --- a/include/internal/database/xgemv.h +++ b/include/internal/database/xgemv.h @@ -30,7 +30,7 @@ const Database::DatabaseEntry Database::XgemvSingle = { }, { // Intel GPUs CL_DEVICE_TYPE_GPU, "Intel", { - { "Iris", { {"WGS",256}, {"WPT",2}, {"VW",1} } }, + { "Iris", { {"WGS",128}, {"WPT",4}, {"VW",4} } }, } }, { // Default diff --git a/src/kernels/xgemv.opencl b/src/kernels/xgemv.opencl index 46a5e784..26e7587f 100644 --- a/src/kernels/xgemv.opencl +++ b/src/kernels/xgemv.opencl @@ -26,54 +26,12 @@ R"( #define WPT 1 // The amount of work-per-thread #endif #ifndef VW - #define VW 1 // Vector width of vectors X and Y + #define VW 1 // Vector width of matrix A loads (only for the fast kernel) #endif // ================================================================================================= -// The multiply-add function for the main part (divisable by WGS) -inline void MatrixVectorMain(const __global real* restrict agm, __local real* xlm, real acc[WPT], - const int gid, const int w, const int kwg, - const int a_ld, const int a_offset, const int a_transposed) { - if (a_transposed == 0) { // Not transposed - #pragma unroll - for (int kl=0; kl 'm' and 'n' are multiples of WGS +// --> 'a_offset' is 0 +// --> 'a_ld' is a multiple of VW +__attribute__((reqd_work_group_size(WGS, 1, 1))) +__kernel void XgemvFast(const int m, const int n, const real alpha, const real beta, + const int a_transposed, + const __global realV* restrict agm, const int a_offset, const int a_ld, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* ygm, const int y_offset, const int y_inc) { + // Local memory for the vector X + __local real xlm[WGS]; + + // Initializes the accumulation register + real acc[WPT]; + #pragma unroll + for (int w=0; w::DoGemv(const Layout layout, const Transpose a_transpose, status = TestVectorY(m_real, y_buffer, y_offset, y_inc, sizeof(T)); if (ErrorIn(status)) { return status; } + // Determines whether or not the fast-version can be used + bool use_fast_kernel = (a_offset == 0) && + IsMultiple(m, db_["WGS"]*db_["WPT"]) && + IsMultiple(n, db_["WGS"]) && + IsMultiple(a_ld, db_["VW"]); + + // If possible, run the fast-version of the kernel + auto kernel_name = (use_fast_kernel) ? "XgemvFast" : "Xgemv"; + // Retrieves the Xgemv kernel from the compiled binary try { auto program = GetProgramFromCache(); - auto kernel = Kernel(program, "Xgemv"); + auto kernel = Kernel(program, kernel_name); // Sets the kernel arguments kernel.SetArgument(0, static_cast(m_real)); diff --git a/src/tuning/tuning.cc b/src/tuning/tuning.cc index 94333089..d617af88 100644 --- a/src/tuning/tuning.cc +++ b/src/tuning/tuning.cc @@ -87,6 +87,7 @@ void TunerAXY(int argc, char* argv[], const Tuner3 &tune_function) { args.n = GetArgument(argc, argv, help, kArgN, size_t{1024}); args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar()); args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar()); + args.layout = GetArgument(argc, argv, help, kArgLayout, Layout::kColMajor); fprintf(stdout, "%s\n", help.c_str()); // Creates input buffers with random data diff --git a/src/tuning/xgemv.cc b/src/tuning/xgemv.cc index 6037a5a0..e2d54729 100644 --- a/src/tuning/xgemv.cc +++ b/src/tuning/xgemv.cc @@ -33,29 +33,33 @@ void XgemvTune(const Arguments &args, std::string kernel_source = #include "../src/kernels/xgemv.opencl" auto sources = common_source + kernel_source; - auto id = tuner.AddKernelFromString(sources, "Xgemv", {args.m}, {1}); + auto id = tuner.AddKernelFromString(sources, "XgemvFast", {args.m}, {1}); tuner.SetReferenceFromString(sources, "Xgemv", {args.m}, {64}); // Sets the tunable parameters and their possible values tuner.AddParameter(id, "WGS", {64, 128, 256, 512, 1024, 1536, 2048}); - tuner.AddParameter(id, "WPT", {1, 2, 4}); - tuner.AddParameter(id, "VW", {1}); + tuner.AddParameter(id, "WPT", {1, 2, 4, 8}); + tuner.AddParameter(id, "VW", {1, 2, 4, 8}); // Tests for a specific precision tuner.AddParameter(id, "PRECISION", {static_cast(args.precision)}); tuner.AddParameterReference("PRECISION", static_cast(args.precision)); + // Sets the constraints + auto MultipleOfX = [] (std::vector v) { return IsMultiple(v[0], v[1]); }; + tuner.AddConstraint(id, MultipleOfX, {"WGS", "VW"}); + tuner.AddConstraint(id, MultipleOfX, {"WPT", "VW"}); + // Modifies the thread-sizes (local) based on the parameters tuner.MulLocalSize(id, {"WGS"}); tuner.DivGlobalSize(id, {"WPT"}); - tuner.DivGlobalSize(id, {"VW"}); // Sets the function's arguments tuner.AddArgumentScalar(static_cast(args.m)); tuner.AddArgumentScalar(static_cast(args.n)); tuner.AddArgumentScalar(args.alpha); tuner.AddArgumentScalar(args.beta); - tuner.AddArgumentScalar(0); + tuner.AddArgumentScalar(static_cast(args.layout)); tuner.AddArgumentInput(a_mat); tuner.AddArgumentScalar(0); tuner.AddArgumentScalar(static_cast(args.m)); -- cgit v1.2.3 From 294a3e3d410c87ffcc7fc550e09b6d45c71a0af8 Mon Sep 17 00:00:00 2001 From: CNugteren Date: Sun, 14 Jun 2015 11:15:53 +0200 Subject: Split the three variations of the GEMV kernel for maximal tuning freedom --- include/internal/database/xgemv.h | 44 ++--- include/internal/tuning.h | 8 +- src/kernels/xgemv.opencl | 342 +++++++++++++++++++++++--------------- src/routines/xgemv.cc | 36 ++-- src/tuning/tuning.cc | 54 +++--- src/tuning/xgemv.cc | 62 ++++--- 6 files changed, 335 insertions(+), 211 deletions(-) (limited to 'include') diff --git a/include/internal/database/xgemv.h b/include/internal/database/xgemv.h index 37d33487..48ff42c8 100644 --- a/include/internal/database/xgemv.h +++ b/include/internal/database/xgemv.h @@ -18,24 +18,24 @@ const Database::DatabaseEntry Database::XgemvSingle = { "Xgemv", Precision::kSingle, { { // NVIDIA GPUs CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { - { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "GeForce GTX 480", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K20m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K40m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // AMD GPUs CL_DEVICE_TYPE_GPU, "AMD", { - { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tahiti", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // Intel GPUs CL_DEVICE_TYPE_GPU, "Intel", { - { "Iris", { {"WGS",128}, {"WPT",4}, {"VW",4} } }, + { "Iris", { {"WGS1",256}, {"WPT1",2}, {"WGS2",64}, {"WPT2",4}, {"VW2",4}, {"WGS3",256}, {"WPT3",2}, {"VW3",8} } }, } }, { // Default CL_DEVICE_TYPE_ALL, kDefault, { - { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { kDefault, { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, } @@ -47,14 +47,14 @@ const Database::DatabaseEntry Database::XgemvDouble = { "Xgemv", Precision::kDouble, { { // NVIDIA GPUs CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { - { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "GeForce GTX 480", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K20m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K40m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // AMD GPUs CL_DEVICE_TYPE_GPU, "AMD", { - { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tahiti", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // Intel GPUs @@ -63,7 +63,7 @@ const Database::DatabaseEntry Database::XgemvDouble = { }, { // Default CL_DEVICE_TYPE_ALL, kDefault, { - { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { kDefault, { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, } @@ -74,24 +74,24 @@ const Database::DatabaseEntry Database::XgemvComplexSingle = { "Xgemv", Precision::kComplexSingle, { { // NVIDIA GPUs CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { - { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "GeForce GTX 480", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K20m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K40m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // AMD GPUs CL_DEVICE_TYPE_GPU, "AMD", { - { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tahiti", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // Intel GPUs CL_DEVICE_TYPE_GPU, "Intel", { - { "Iris", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Iris", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // Default CL_DEVICE_TYPE_ALL, kDefault, { - { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { kDefault, { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, } @@ -103,14 +103,14 @@ const Database::DatabaseEntry Database::XgemvComplexDouble = { "Xgemv", Precision::kComplexDouble, { { // NVIDIA GPUs CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { - { "GeForce GTX 480", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K20m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, - { "Tesla K40m", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "GeForce GTX 480", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K20m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K40m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // AMD GPUs CL_DEVICE_TYPE_GPU, "AMD", { - { "Tahiti", { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { "Tahiti", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // Intel GPUs @@ -119,7 +119,7 @@ const Database::DatabaseEntry Database::XgemvComplexDouble = { }, { // Default CL_DEVICE_TYPE_ALL, kDefault, { - { kDefault, { {"WGS",64}, {"WPT",1}, {"VW",1} } }, + { kDefault, { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, } diff --git a/include/internal/tuning.h b/include/internal/tuning.h index 6ddf4b3a..d0cf6b5d 100644 --- a/include/internal/tuning.h +++ b/include/internal/tuning.h @@ -34,13 +34,19 @@ using Tuner3 = std::function&, const std::vector&, const std::vector&, std::vector&, cltune::Tuner&)>; +// As above, but now with an additional ID for the variation +template +using Tuner3V = std::function&, const size_t, + const std::vector&, const std::vector&, std::vector&, + cltune::Tuner&)>; + // Tuner for vector-vector input template void TunerXY(int argc, char* argv[], const Tuner2 &tune_function); // Tuner for matrix-vector-vector input template -void TunerAXY(int argc, char* argv[], const Tuner3 &tune_function); +void TunerAXY(int argc, char* argv[], const size_t num_variations, const Tuner3V &tune_function); // Tuner for matrix-matrix input template diff --git a/src/kernels/xgemv.opencl b/src/kernels/xgemv.opencl index 26e7587f..b1b2fc69 100644 --- a/src/kernels/xgemv.opencl +++ b/src/kernels/xgemv.opencl @@ -19,42 +19,63 @@ R"( // Parameters set by the tuner or by the database. Here they are given a basic default value in case // this kernel file is used outside of the CLBlast library. -#ifndef WGS - #define WGS 64 // The local work-group size + +// 1: For the full version of the kernel +#ifndef WGS1 + #define WGS1 64 // The local work-group size +#endif +#ifndef WPT1 + #define WPT1 1 // The amount of work-per-thread +#endif + +// 2: For the fast version +#ifndef WGS2 + #define WGS2 64 // The local work-group size +#endif +#ifndef WPT2 + #define WPT2 1 // The amount of work-per-thread +#endif +#ifndef VW2 + #define VW2 1 // Vector width of matrix A loads #endif -#ifndef WPT - #define WPT 1 // The amount of work-per-thread + +// 3: For the fast rotated version +#ifndef WGS3 + #define WGS3 64 // The local work-group size +#endif +#ifndef WPT3 + #define WPT3 1 // The amount of work-per-thread #endif -#ifndef VW - #define VW 1 // Vector width of matrix A loads (only for the fast kernel) +#ifndef VW3 + #define VW3 1 // Vector width of matrix A loads #endif // ================================================================================================= // Full version of the kernel -__attribute__((reqd_work_group_size(WGS, 1, 1))) +__attribute__((reqd_work_group_size(WGS1, 1, 1))) __kernel void Xgemv(const int m, const int n, const real alpha, const real beta, - const int a_transposed, + const int a_rotated, const __global real* restrict agm, const int a_offset, const int a_ld, const __global real* restrict xgm, const int x_offset, const int x_inc, __global real* ygm, const int y_offset, const int y_inc) { // Local memory for the vector X - __local real xlm[WGS]; + __local real xlm[WGS1]; // Initializes the accumulation register - real acc[WPT]; + real acc[WPT1]; #pragma unroll - for (int w=0; w 'm' and 'n' are multiples of WGS +// --> 'm' and 'n' are multiples of WGS2 // --> 'a_offset' is 0 -// --> 'a_ld' is a multiple of VW -__attribute__((reqd_work_group_size(WGS, 1, 1))) +// --> 'a_ld' is a multiple of VW2 +// --> 'a_rotated' is 0 +__attribute__((reqd_work_group_size(WGS2, 1, 1))) __kernel void XgemvFast(const int m, const int n, const real alpha, const real beta, - const int a_transposed, - const __global realV* restrict agm, const int a_offset, const int a_ld, + const int a_rotated, + const __global realVF* restrict agm, const int a_offset, const int a_ld, const __global real* restrict xgm, const int x_offset, const int x_inc, __global real* ygm, const int y_offset, const int y_inc) { // Local memory for the vector X - __local real xlm[WGS]; + __local real xlm[WGS2]; // Initializes the accumulation register - real acc[WPT]; + real acc[WPT2]; #pragma unroll - for (int w=0; w 'm' and 'n' are multiples of WGS3 +// --> 'a_offset' is 0 +// --> 'a_ld' is a multiple of VW3 +// --> 'a_rotated' is 1 +__attribute__((reqd_work_group_size(WGS3, 1, 1))) +__kernel void XgemvFastRot(const int m, const int n, const real alpha, const real beta, + const int a_rotated, + const __global realVFR* restrict agm, const int a_offset, const int a_ld, + const __global real* restrict xgm, const int x_offset, const int x_inc, + __global real* ygm, const int y_offset, const int y_inc) { + // Local memory for the vector X + __local real xlm[WGS3]; + + // Initializes the accumulation register + real acc[WPT3]; + #pragma unroll + for (int w=0; w::DoGemv(const Layout layout, const Transpose a_transpose, if (ErrorIn(status)) { return status; } // Determines whether or not the fast-version can be used - bool use_fast_kernel = (a_offset == 0) && - IsMultiple(m, db_["WGS"]*db_["WPT"]) && - IsMultiple(n, db_["WGS"]) && - IsMultiple(a_ld, db_["VW"]); - - // If possible, run the fast-version of the kernel - auto kernel_name = (use_fast_kernel) ? "XgemvFast" : "Xgemv"; + bool use_fast_kernel = (a_offset == 0) && (a_rotated == 0) && + IsMultiple(m, db_["WGS2"]*db_["WPT2"]) && + IsMultiple(n, db_["WGS2"]) && + IsMultiple(a_ld, db_["VW2"]); + bool use_fast_kernel_rot = (a_offset == 0) && (a_rotated == 1) && + IsMultiple(m, db_["WGS3"]*db_["WPT3"]) && + IsMultiple(n, db_["WGS3"]) && + IsMultiple(a_ld, db_["VW3"]); + + // If possible, run the fast-version (rotated or non-rotated) of the kernel + auto kernel_name = "Xgemv"; + auto m_ceiled = Ceil(m_real, db_["WGS1"]*db_["WPT1"]); + auto global_size = m_ceiled / db_["WPT1"]; + auto local_size = db_["WGS1"]; + if (use_fast_kernel) { + kernel_name = "XgemvFast"; + global_size = m_real / db_["WPT2"]; + local_size = db_["WGS2"]; + } + if (use_fast_kernel_rot) { + kernel_name = "XgemvFastRot"; + global_size = m_real / db_["WPT3"]; + local_size = db_["WGS3"]; + } // Retrieves the Xgemv kernel from the compiled binary try { @@ -100,9 +117,8 @@ StatusCode Xgemv::DoGemv(const Layout layout, const Transpose a_transpose, kernel.SetArgument(13, static_cast(y_inc)); // Launches the kernel - auto m_ceiled = Ceil(m_real, db_["WGS"]*db_["WPT"]); - auto global = std::vector{m_ceiled / db_["WPT"]}; - auto local = std::vector{db_["WGS"]}; + auto global = std::vector{global_size}; + auto local = std::vector{local_size}; status = RunKernel(kernel, global, local); if (ErrorIn(status)) { return status; } diff --git a/src/tuning/tuning.cc b/src/tuning/tuning.cc index d617af88..2dcb11d5 100644 --- a/src/tuning/tuning.cc +++ b/src/tuning/tuning.cc @@ -75,7 +75,8 @@ template void TunerXY(int, char**, const Tuner2&); // Function to get command-line argument, set-up the input buffers, configure the tuner, and collect // the results. Used for matrix-vector-vector routines. template -void TunerAXY(int argc, char* argv[], const Tuner3 &tune_function) { +void TunerAXY(int argc, char* argv[], const size_t num_variations, + const Tuner3V &tune_function) { // Sets the parameters and platform/device for which to tune (command-line options) auto help = std::string{"* Options given/available:\n"}; @@ -83,11 +84,10 @@ void TunerAXY(int argc, char* argv[], const Tuner3 &tune_function) { args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0}); args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0}); args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle); - args.m = GetArgument(argc, argv, help, kArgM, size_t{1024}); - args.n = GetArgument(argc, argv, help, kArgN, size_t{1024}); + args.m = GetArgument(argc, argv, help, kArgM, size_t{2048}); + args.n = GetArgument(argc, argv, help, kArgN, size_t{2048}); args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar()); args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar()); - args.layout = GetArgument(argc, argv, help, kArgLayout, Layout::kColMajor); fprintf(stdout, "%s\n", help.c_str()); // Creates input buffers with random data @@ -98,36 +98,40 @@ void TunerAXY(int argc, char* argv[], const Tuner3 &tune_function) { PopulateVector(x_vec); PopulateVector(y_vec); - // Initializes the tuner for the chosen device - cltune::Tuner tuner(args.platform_id, args.device_id); + // Loop over the different variations of the kernel + for (auto variation=size_t{1}; variation<=num_variations; ++variation) { - // Use full-search to explore all parameter combinations. - tuner.UseFullSearch(); + // Initializes the tuner for the chosen device + cltune::Tuner tuner(args.platform_id, args.device_id); - // Configures the tuning parameters (kernel specific) - tune_function(args, a_mat, x_vec, y_vec, tuner); + // Use full-search to explore all parameter combinations. + tuner.UseFullSearch(); - // Starts the tuning process - tuner.Tune(); + // Configures the tuning parameters (kernel specific) + tune_function(args, variation, a_mat, x_vec, y_vec, tuner); - // Prints the results to screen - auto time_ms = tuner.PrintToScreen(); - tuner.PrintFormatted(); + // Starts the tuning process + tuner.Tune(); - // Also prints the performance of the best-case in terms of GB/s and GFLOPS - const auto mega_bytes = ((args.m*args.n + 2*args.m + args.n)*GetBytes(args.precision)) * 1.0e-6; - const auto mega_flops = (2*args.m*args.n) * 1.0e-6; - if (time_ms != 0.0) { - printf("[ -------> ] %.1lf ms or %.1lf GB/s or %.1lf GFLOPS\n", - time_ms, mega_bytes/time_ms, mega_flops/time_ms); + // Prints the results to screen + auto time_ms = tuner.PrintToScreen(); + tuner.PrintFormatted(); + + // Also prints the performance of the best-case in terms of GB/s and GFLOPS + const auto mega_bytes = ((args.m*args.n + 2*args.m + args.n)*GetBytes(args.precision)) * 1.0e-6; + const auto mega_flops = (2*args.m*args.n) * 1.0e-6; + if (time_ms != 0.0) { + printf("[ -------> ] %.1lf ms or %.1lf GB/s or %.1lf GFLOPS\n", + time_ms, mega_bytes/time_ms, mega_flops/time_ms); + } } } // Compiles the above function -template void TunerAXY(int, char**, const Tuner3&); -template void TunerAXY(int, char**, const Tuner3&); -template void TunerAXY(int, char**, const Tuner3&); -template void TunerAXY(int, char**, const Tuner3&); +template void TunerAXY(int, char**, const size_t, const Tuner3V&); +template void TunerAXY(int, char**, const size_t, const Tuner3V&); +template void TunerAXY(int, char**, const size_t, const Tuner3V&); +template void TunerAXY(int, char**, const size_t, const Tuner3V&); // ================================================================================================= diff --git a/src/tuning/xgemv.cc b/src/tuning/xgemv.cc index e2d54729..dccd250c 100644 --- a/src/tuning/xgemv.cc +++ b/src/tuning/xgemv.cc @@ -8,6 +8,10 @@ // Cedric Nugteren // // This file implements an auto-tuner to tune the Xgemv OpenCL kernel. It uses the CLTune library. +// Three variations of the kernel are tuned: +// 1: The full version of the kernel +// 2: The fast version for non-transposed matrices +// 3: The fast version for transposed matrices // // ================================================================================================= @@ -23,43 +27,60 @@ namespace clblast { // The Xgemv auto-tuner template -void XgemvTune(const Arguments &args, +void XgemvTune(const Arguments &args, const size_t variation, const std::vector &a_mat, const std::vector &x_vec, std::vector &y_vec, cltune::Tuner &tuner) { + // Sets the kernel name and the layout argument + auto kernel_name = (variation == 1) ? "Xgemv" : ((variation == 2) ? "XgemvFast" : "XgemvFastRot"); + auto a_rotated = (variation == 3) ? 1 : 0; + // This points to the Xgemv kernel as found in the CLBlast library std::string common_source = #include "../src/kernels/common.opencl" std::string kernel_source = #include "../src/kernels/xgemv.opencl" auto sources = common_source + kernel_source; - auto id = tuner.AddKernelFromString(sources, "XgemvFast", {args.m}, {1}); + auto id = tuner.AddKernelFromString(sources, kernel_name, {args.m}, {1}); tuner.SetReferenceFromString(sources, "Xgemv", {args.m}, {64}); - // Sets the tunable parameters and their possible values - tuner.AddParameter(id, "WGS", {64, 128, 256, 512, 1024, 1536, 2048}); - tuner.AddParameter(id, "WPT", {1, 2, 4, 8}); - tuner.AddParameter(id, "VW", {1, 2, 4, 8}); + // Helper for the constraints + auto MultipleOfX = [] (std::vector v) { return IsMultiple(v[0], v[1]); }; + + // Sets the tunable parameters, their possible values, the adjusted thread sizes, and constraints + if (variation == 1) { + tuner.AddParameter(id, "WGS1", {64, 128, 256, 512, 1024, 1536, 2048}); + tuner.AddParameter(id, "WPT1", {1, 2, 4, 8}); + tuner.MulLocalSize(id, {"WGS1"}); + tuner.DivGlobalSize(id, {"WPT1"}); + } + else if (variation == 2) { + tuner.AddParameter(id, "WGS2", {64, 128, 256, 512, 1024, 1536, 2048}); + tuner.AddParameter(id, "WPT2", {1, 2, 4, 8}); + tuner.AddParameter(id, "VW2", {1, 2, 4, 8}); + tuner.MulLocalSize(id, {"WGS2"}); + tuner.DivGlobalSize(id, {"WPT2"}); + tuner.AddConstraint(id, MultipleOfX, {"WPT2", "VW2"}); + } + else if (variation == 3) { + tuner.AddParameter(id, "WGS3", {64, 128, 256, 512, 1024, 1536, 2048}); + tuner.AddParameter(id, "WPT3", {1, 2, 4, 8}); + tuner.AddParameter(id, "VW3", {1, 2, 4, 8}); + tuner.MulLocalSize(id, {"WGS3"}); + tuner.DivGlobalSize(id, {"WPT3"}); + tuner.AddConstraint(id, MultipleOfX, {"WGS3", "VW3"}); + } // Tests for a specific precision tuner.AddParameter(id, "PRECISION", {static_cast(args.precision)}); tuner.AddParameterReference("PRECISION", static_cast(args.precision)); - // Sets the constraints - auto MultipleOfX = [] (std::vector v) { return IsMultiple(v[0], v[1]); }; - tuner.AddConstraint(id, MultipleOfX, {"WGS", "VW"}); - tuner.AddConstraint(id, MultipleOfX, {"WPT", "VW"}); - - // Modifies the thread-sizes (local) based on the parameters - tuner.MulLocalSize(id, {"WGS"}); - tuner.DivGlobalSize(id, {"WPT"}); - // Sets the function's arguments tuner.AddArgumentScalar(static_cast(args.m)); tuner.AddArgumentScalar(static_cast(args.n)); tuner.AddArgumentScalar(args.alpha); tuner.AddArgumentScalar(args.beta); - tuner.AddArgumentScalar(static_cast(args.layout)); + tuner.AddArgumentScalar(static_cast(a_rotated)); tuner.AddArgumentInput(a_mat); tuner.AddArgumentScalar(0); tuner.AddArgumentScalar(static_cast(args.m)); @@ -75,12 +96,13 @@ void XgemvTune(const Arguments &args, // Main function which calls the common client code with the routine-specific function as argument. void TunerXgemv(int argc, char *argv[]) { + auto num_variations = size_t{3}; switch(GetPrecision(argc, argv)) { case Precision::kHalf: throw std::runtime_error("Unsupported precision mode"); - case Precision::kSingle: TunerAXY(argc, argv, XgemvTune); break; - case Precision::kDouble: TunerAXY(argc, argv, XgemvTune); break; - case Precision::kComplexSingle: TunerAXY(argc, argv, XgemvTune); break; - case Precision::kComplexDouble: TunerAXY(argc, argv, XgemvTune); break; + case Precision::kSingle: TunerAXY(argc, argv, num_variations, XgemvTune); break; + case Precision::kDouble: TunerAXY(argc, argv, num_variations, XgemvTune); break; + case Precision::kComplexSingle: TunerAXY(argc, argv, num_variations, XgemvTune); break; + case Precision::kComplexDouble: TunerAXY(argc, argv, num_variations, XgemvTune); break; } } -- cgit v1.2.3 From ce703a2f5ab18402ce72899cc05e440c65c3ee85 Mon Sep 17 00:00:00 2001 From: CNugteren Date: Mon, 15 Jun 2015 08:41:13 +0200 Subject: Added tuning for DGEMV on Iris and SGEMV on K40m --- include/internal/database/xgemv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/internal/database/xgemv.h b/include/internal/database/xgemv.h index 48ff42c8..ef45f486 100644 --- a/include/internal/database/xgemv.h +++ b/include/internal/database/xgemv.h @@ -20,7 +20,7 @@ const Database::DatabaseEntry Database::XgemvSingle = { CL_DEVICE_TYPE_GPU, "NVIDIA Corporation", { { "GeForce GTX 480", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, { "Tesla K20m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, - { "Tesla K40m", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Tesla K40m", { {"WGS1",256}, {"WPT1",1}, {"WGS2",256}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",4} } }, } }, { // AMD GPUs @@ -86,7 +86,7 @@ const Database::DatabaseEntry Database::XgemvComplexSingle = { }, { // Intel GPUs CL_DEVICE_TYPE_GPU, "Intel", { - { "Iris", { {"WGS1",64}, {"WPT1",1}, {"WGS2",64}, {"WPT2",1}, {"VW2",1}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, + { "Iris", { {"WGS1",256}, {"WPT1",1}, {"WGS2",64}, {"WPT2",4}, {"VW2",2}, {"WGS3",64}, {"WPT3",1}, {"VW3",1} } }, } }, { // Default -- cgit v1.2.3