summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorkodonell <kodonell@users.noreply.github.com>2018-03-27 08:55:39 +1300
committerkodonell <kodonell@users.noreply.github.com>2018-03-27 08:55:39 +1300
commit173a7eb928a1bba0dffe3587cf34336065c9f4d0 (patch)
tree221e63539dd30181ff7405284c0e9f68b97867aa
parentd16f2d131706e1a6ed5202194fac857e3dda014d (diff)
parenta97d8a01970c49f2b21d952e841668da3db0184d (diff)
merged
-rw-r--r--CHANGELOG2
-rw-r--r--CMakeLists.txt10
-rw-r--r--README.md3
-rw-r--r--ROADMAP.md2
-rw-r--r--doc/api.md74
-rw-r--r--doc/glossary.md14
-rw-r--r--doc/tuning.md24
-rw-r--r--include/clblast.h57
-rw-r--r--samples/tuning_api.cpp77
-rw-r--r--scripts/benchmark/utils.py5
-rwxr-xr-xscripts/generator/generator.py4
-rw-r--r--src/database/apple_cpu_fallback.hpp2
-rw-r--r--src/kernels/level2/xtrsv.opencl2
-rw-r--r--src/kernels/level3/level3.opencl2
-rw-r--r--src/pyclblast/README.md8
-rw-r--r--src/pyclblast/samples/saxpy.py1
-rw-r--r--src/pyclblast/samples/sgemm.py1
-rw-r--r--src/pyclblast/samples/sgemv.py1
-rw-r--r--src/pyclblast/test/__init__.py0
-rw-r--r--src/pyclblast/test/test_pyclblast.py81
-rw-r--r--src/routines/common.cpp8
-rw-r--r--src/tuning/configurations.cpp34
-rw-r--r--src/tuning/configurations.hpp21
-rw-r--r--src/tuning/kernels/copy_fast.cpp91
-rw-r--r--src/tuning/kernels/copy_fast.hpp97
-rw-r--r--src/tuning/kernels/copy_pad.cpp99
-rw-r--r--src/tuning/kernels/copy_pad.hpp105
-rw-r--r--src/tuning/kernels/invert.cpp104
-rw-r--r--src/tuning/kernels/invert.hpp115
-rw-r--r--src/tuning/kernels/transpose_fast.cpp91
-rw-r--r--src/tuning/kernels/transpose_fast.hpp102
-rw-r--r--src/tuning/kernels/transpose_pad.cpp98
-rw-r--r--src/tuning/kernels/transpose_pad.hpp109
-rw-r--r--src/tuning/kernels/xaxpy.cpp93
-rw-r--r--src/tuning/kernels/xaxpy.hpp99
-rw-r--r--src/tuning/kernels/xdot.cpp99
-rw-r--r--src/tuning/kernels/xdot.hpp110
-rw-r--r--src/tuning/kernels/xgemm.cpp165
-rw-r--r--src/tuning/kernels/xgemm.hpp174
-rw-r--r--src/tuning/kernels/xgemm_direct.cpp162
-rw-r--r--src/tuning/kernels/xgemm_direct.hpp171
-rw-r--r--src/tuning/kernels/xgemv.cpp138
-rw-r--r--src/tuning/kernels/xgemv.hpp155
-rw-r--r--src/tuning/kernels/xger.cpp100
-rw-r--r--src/tuning/kernels/xger.hpp106
-rw-r--r--src/tuning/tuning.cpp14
-rw-r--r--src/tuning/tuning.hpp14
-rw-r--r--src/tuning/tuning_api.cpp387
-rw-r--r--src/utilities/timing.cpp7
-rw-r--r--src/utilities/timing.hpp3
-rw-r--r--test/correctness/misc/preprocessor.cpp14
51 files changed, 2244 insertions, 1211 deletions
diff --git a/CHANGELOG b/CHANGELOG
index c23bf4c0..5815e343 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -2,6 +2,8 @@
Development (next version)
- Added Python interface to CLBlast 'PyCLBlast'
- Added CLBlast to Ubuntu PPA and macOS Homebrew package managers
+- Added an API to run the tuners programmatically without any I/O
+- Re-added a local memory size constraint to the tuners
- Updated and reorganised the CLBlast documentation
- Various minor fixes and enhancements
- Added non-BLAS level-1 routines:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2e685d76..eb04287e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -210,7 +210,7 @@ set(PRECISIONS 32 64 3232 6464 16)
# Sample programs
if(OPENCL)
- set(SAMPLE_PROGRAMS_CPP sgemm sgemm_batched dtrsm)
+ set(SAMPLE_PROGRAMS_CPP sgemm sgemm_batched dtrsm tuning_api)
set(SAMPLE_PROGRAMS_C sasum dgemv sgemm haxpy cache)
if(NETLIB)
set(SAMPLE_PROGRAMS_C ${SAMPLE_PROGRAMS_C} sgemm_netlib)
@@ -235,6 +235,8 @@ set(SOURCES
src/kernel_preprocessor.cpp
src/routine.cpp
src/routines/levelx/xinvert.cpp # only source, don't include it as a test
+ src/tuning/configurations.cpp
+ src/tuning/tuning_api.cpp
)
set(HEADERS # such that they can be discovered by IDEs such as CLion and Visual Studio
include/clblast_half.h
@@ -258,6 +260,9 @@ set(HEADERS # such that they can be discovered by IDEs such as CLion and Visual
src/kernel_preprocessor.hpp
src/cxpp11_common.hpp
src/routine.hpp
+ src/tuning/configurations.hpp
+ src/tuning/tuning.hpp
+ src/tuning/routines/routine_tuner.hpp
)
if(OPENCL)
set(SOURCES ${SOURCES} src/clblast.cpp src/clblast_c.cpp)
@@ -295,6 +300,9 @@ foreach(DATABASE ${DATABASES})
set(HEADERS ${HEADERS} src/database/kernels/${DATABASE}/${DATABASE}_3232.hpp)
set(HEADERS ${HEADERS} src/database/kernels/${DATABASE}/${DATABASE}_6464.hpp)
endforeach()
+foreach(KERNEL ${KERNELS})
+ set(HEADERS ${HEADERS} src/tuning/kernels/${KERNEL}.hpp)
+endforeach()
# Creates and links the library
if(BUILD_SHARED_LIBS)
diff --git a/README.md b/README.md
index 047c7928..2084e51e 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ CLBlast: The tuned OpenCL BLAS library
|-----|-----|-----|-----|-----|
| Windows | [![Build Status](https://ci.appveyor.com/api/projects/status/github/cnugteren/clblast?branch=master&svg=true)](https://ci.appveyor.com/project/CNugteren/clblast) | N/A | N/A | N/A |
| Linux | [![Build Status](https://travis-ci.org/CNugteren/CLBlast.svg?branch=master)](https://travis-ci.org/CNugteren/CLBlast/branches) | [![Build Status](http://67.207.87.39:8010/badges/clblast-linux-intel-e5-2620-v4.svg)](http://67.207.87.39:8010/#/builders/97) | [![Build Status](http://67.207.87.39:8010/badges/clblast-linux-nvidia-k80.svg)](http://67.207.87.39:8010/#/builders/98) | [![Build Status](http://67.207.87.39:8010/badges/clblast-linux-amd-w9100.svg)](http://67.207.87.39:8010/#/builders/96) |
-| OS X | [![Build Status](https://travis-ci.org/CNugteren/CLBlast.svg?branch=master)](https://travis-ci.org/CNugteren/CLBlast/branches) | N/A | N/A | N/A |
+| OS X | [![Build Status](https://travis-ci.org/CNugteren/CLBlast.svg?branch=master)](https://travis-ci.org/CNugteren/CLBlast/branches) | [![Build Status](http://67.207.87.39:8010/badges/clblast-osx-intel-e5-2620-v4.svg)](http://67.207.87.39:8010/#/builders/101) | N/A | N/A |
CLBlast is a modern, lightweight, performant and tunable OpenCL BLAS library written in C++11. It is designed to leverage the full performance potential of a wide variety of OpenCL devices from different vendors, including desktop and laptop GPUs, embedded GPUs, and other accelerators. CLBlast implements BLAS routines: basic linear algebra subprograms operating on vectors and matrices. See [the CLBlast website](https://cnugteren.github.io/clblast) for performance reports on various devices as well as the latest CLBlast news.
@@ -78,6 +78,7 @@ More detailed documentation is available in separate files:
* [Tuning for better performance](doc/tuning.md)
* [Testing the library for correctness](doc/testing.md)
* [Bindings / wrappers for other languages](doc/bindings.md)
+* [Glossary with some terms explained](doc/glossary.md)
Known issues
diff --git a/ROADMAP.md b/ROADMAP.md
index bcae8bc4..28a4afa2 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -17,6 +17,6 @@ This file gives an overview of the main features planned for addition to CLBlast
| [#233](https://github.com/CNugteren/CLBlast/issues/233) | Feb '18 | CNugteren | ✔ | Add CLBlast to common package managers |
| [#223](https://github.com/CNugteren/CLBlast/issues/223) | Feb '18 | CNugteren | ✔ | Python OpenCL interface |
| [#228](https://github.com/CNugteren/CLBlast/issues/228) | Mar '18 | CNugteren | | Improving performance for Qualcomm Adreno GPUs |
-| [#237](https://github.com/CNugteren/CLBlast/issues/237) | Mar '18 | CNugteren | | Making tuning possible from the CLBlast API |
+| [#237](https://github.com/CNugteren/CLBlast/issues/237) | Mar '18 | CNugteren | ✔ | Making tuning possible from the CLBlast API |
| [#136](https://github.com/CNugteren/CLBlast/issues/136) | Apr '18 | CNugteren | | Implement xAXPBY and xSET |
| [#169](https://github.com/CNugteren/CLBlast/issues/169) | ?? | dividiti | | Problem-specific tuning parameter selection |
diff --git a/doc/api.md b/doc/api.md
index 0fbdeaa0..a60e16ce 100644
--- a/doc/api.md
+++ b/doc/api.md
@@ -3497,3 +3497,77 @@ Arguments to OverrideParameters (C++ version):
* `const std::string &kernel_name`: The target kernel name. This has to be one of the existing CLBlast kernels (Xaxpy, Xdot, Xgemv, XgemvFast, XgemvFastRot, Xgemv, Xger, Copy, Pad, Transpose, Padtranspose, Xgemm, or XgemmDirect). If this argument is incorrect, this function will return with the `clblast::kInvalidOverrideKernel` status-code.
* `const Precision precision`: The CLBlast precision enum to set the new parameters for.
* `const std::unordered_map<std::string,size_t> &parameters`: An unordered map of strings to integers. This has to contain all the tuning parameters for a specific kernel as reported by the included tuners (e.g. `{ {"COPY_DIMX",8}, {"COPY_DIMY",32}, {"COPY_VW",4}, {"COPY_WPT",8} }` for the `Copy` kernel). If this argument is incorrect, this function will return with the `clblast::kMissingOverrideParameter` status-code.
+
+
+
+Tune<kernel_name>: Run the tuner for a particular kernel (advanced usage)
+-------------
+
+The CLBlast kernels can be tuned using the tuning binaries, but also programmatically through an API. This is only recommended for advanced usage, see for more information [the tuning docs](tuning.md).
+
+C++ API:
+```
+// Tunes the "Xaxpy" kernel, used for many level-1 routines such as XAXPY, XCOPY, and XSWAP
+template <typename T>
+StatusCode PUBLIC_API TuneXaxpy(cl_command_queue* queue, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xdot" kernel, used for level-1 reduction routines such as XDOT, XMAX, and XSUM
+template <typename T>
+StatusCode PUBLIC_API TuneXdot(cl_command_queue* queue, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xgemv" kernel, used for matrix-vector level-2 routines such as XGEMV, XGBMV, and XHEMV
+template <typename T>
+StatusCode PUBLIC_API TuneXgemv(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xger" kernel, used for matrix update level-2 routines such as XGER, XHER, and XSYR2
+template <typename T>
+StatusCode PUBLIC_API TuneXger(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xgemm" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode PUBLIC_API TuneXgemm(cl_command_queue* queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "XgemmDiret" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode PUBLIC_API TuneXgemmDirect(cl_command_queue* queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Copy" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode PUBLIC_API TuneCopy(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Pad" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode PUBLIC_API TunePad(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Transpose" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode PUBLIC_API TuneTranspose(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Padtranspose" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode PUBLIC_API TunePadtranspose(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xgemm" kernel, used for the level-3 routine XTRSM
+template <typename T>
+StatusCode PUBLIC_API TuneInvert(cl_command_queue* queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+```
+
+Arguments to Tune<kernel_name> (C++ version):
+
+* `cl_command_queue* queue`: Pointer to an OpenCL command queue associated with a context and device to tune the kernel for.
+* `const size_t m`: The routine argument `m` to tune for (not applicable for all kernels)
+* `const size_t n`: The routine argument `n` to tune for
+* `const size_t k`: The routine argument `k` to tune for (not applicable for all kernels)
+* `const double fraction`: A value between 0.0 and 1.0 which determines the fraction of the tuning search space to explore.
+* `std::unordered_map<std::string,size_t> &parameters`: An unordered map of strings to integers. This will return the best found tuning parameters.
diff --git a/doc/glossary.md b/doc/glossary.md
new file mode 100644
index 00000000..821ffc69
--- /dev/null
+++ b/doc/glossary.md
@@ -0,0 +1,14 @@
+CLBlast: Glossary
+================
+
+This document describes some commonly used terms in CLBlast documentation and code. For other information about CLBlast, see the [main README](../README.md).
+
+* __BLAS__: The set of 'Basic Linear Algebra Subroutines'.
+* __Netlib BLAS__: The official BLAS API definition, with __CBLAS__ providing the C headers.
+* __OpenCL__: The open compute language, a Khronos standard for heterogeneous and parallel computing, e.g. on GPUs.
+* __kernel__: An OpenCL parallel program that runs on the target device.
+* __clBLAS__: Another OpenCL BLAS library, maintained by AMD.
+* __cuBLAS__: The main CUDA BLAS library, maintained by NVIDIA.
+* __GEMM__: The 'GEneral Matrix Multiplication' routine.
+* __Direct GEMM__: Computing GEMM using a single generic kernel which handles all cases (e.g. all kinds of matrix sizes).
+* __Indirect GEMM__: Computing GEMM using multiple kernels: the main GEMM kernel and a few pre-processing and post-processing kernels. The main kernel makes several assumptions (e.g. sizes need to be multiples of 32), which the other kernels make sure are satisfied. The main kernel is often faster than the generic kernel of the direct approach, but the cost of pre-processing and post-processing kernels can sometimes be high for small sizes or particular devices.
diff --git a/doc/tuning.md b/doc/tuning.md
index 88c4fc4c..ebf3cb0c 100644
--- a/doc/tuning.md
+++ b/doc/tuning.md
@@ -100,6 +100,14 @@ In summary, tuning the entire library for your device can be done as follows (st
After the kernels are tuned, you can run the `clblast_tuner_routine_xgemm` tuner to optimize the high-level GEMM routine, i.e. selecting which method to use: the direct kernel or the in-direct kernel.
+Tuning using the API (advanced users only)
+-------------
+
+Apart from running the tuning binaries, it is also possible to run the tuners programmatically through the CLBlast API. This could be useful if you want to tune for non-standard arguments (e.g. a rectangular or very small matrix). The tuning results can then also be set programmatically using `OverrideParameters`.
+
+The tuning API does not perform any disk or stdout I/O, thus it is not possible to track progress. Running the regular tuner binaries should give an idea of the amount of configurations to explore for a particular device, thus giving an indication of a good value for the `fraction` argument (see the [API documentation](api.md) for more details).
+
+
Inspecting and changing tuning parameters at run-time
-------------
@@ -120,3 +128,19 @@ Tuning OpenCL compiler options
-------------
For all of CLBlast's APIs, it is possible to optionally set an OS environmental variable `CLBLAST_BUILD_OPTIONS` to pass specific build options to the OpenCL compiler. Also make sure this is set in the same way when running the tuners.
+
+
+Which kernels are used for which routines?
+-------------
+
+To find out which tuners to run for which routines, you can use the table below. The kernel names correspond to the tuner binaries, the tuner API, and to the arguments for `OverrideParameters` and `RetrieveParameters`.
+
+| Routines | Kernel(s) / Tuner(s) |
+| -------------------------------------------------------------------------|---------------------------------|
+| AXPY COPY SCAL SWAP OMATCOPY AXPYBATCHED | Xaxpy |
+| AMAX ASUM DOT DOTC DOTU NRM2 SUM MAX MIN AMIN | Xdot |
+| GBMV GEMV HBMV HEMV HPMV SBMV SPMV SYMV TMBV TPMV TRMV TRSV | Xgemv |
+| GER GERC GERU HER HER2 HPR HPR2 SPR SPR2 SYR SYR2 | Xger |
+| GEMM HEMM HER2K HERK SYMM SYR2K SYRK TRMM GEMMBATCHED GEMMSTRIDEDBATCHED | Xgemm XgemmDirect Copy Pad Transpose Padtranspose |
+| TRSM | Xgemm XgemmDirect Copy Pad Transpose Padtranspose Invert |
+| IM2COL | Copy |
diff --git a/include/clblast.h b/include/clblast.h
index 9d3b9ea0..ce64b37a 100644
--- a/include/clblast.h
+++ b/include/clblast.h
@@ -705,6 +705,63 @@ StatusCode PUBLIC_API OverrideParameters(const cl_device_id device, const std::s
// =================================================================================================
+// Tunes the "Xaxpy" kernel, used for many level-1 routines such as XAXPY, XCOPY, and XSWAP
+template <typename T>
+StatusCode TuneXaxpy(cl_command_queue* queue, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xdot" kernel, used for level-1 reduction routines such as XDOT, XMAX, and XSUM
+template <typename T>
+StatusCode TuneXdot(cl_command_queue* queue, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xgemv" kernel, used for matrix-vector level-2 routines such as XGEMV, XGBMV, and XHEMV
+template <typename T>
+StatusCode TuneXgemv(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xger" kernel, used for matrix update level-2 routines such as XGER, XHER, and XSYR2
+template <typename T>
+StatusCode TuneXger(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xgemm" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode TuneXgemm(cl_command_queue* queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "XgemmDiret" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode TuneXgemmDirect(cl_command_queue* queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Copy" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode TuneCopy(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Pad" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode TunePad(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Transpose" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode TuneTranspose(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Padtranspose" kernel, used for most level-3 routines such as XGEMM, XSYMM, and XHER2K
+template <typename T>
+StatusCode TunePadtranspose(cl_command_queue* queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// Tunes the "Xgemm" kernel, used for the level-3 routine XTRSM
+template <typename T>
+StatusCode TuneInvert(cl_command_queue* queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters);
+
+// =================================================================================================
+
} // namespace clblast
// CLBLAST_CLBLAST_H_
diff --git a/samples/tuning_api.cpp b/samples/tuning_api.cpp
new file mode 100644
index 00000000..f92b6909
--- /dev/null
+++ b/samples/tuning_api.cpp
@@ -0,0 +1,77 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file demonstrates the use of the runtime tuning API. It is a stand-alone example, but it
+// does require the Khronos C++ OpenCL API header file (downloaded by CMake).
+//
+// =================================================================================================
+
+#include <cstdio>
+#include <chrono>
+#include <vector>
+
+#define CL_USE_DEPRECATED_OPENCL_1_1_APIS // to disable deprecation warnings
+#define CL_USE_DEPRECATED_OPENCL_1_2_APIS // to disable deprecation warnings
+
+// Includes the C++ OpenCL API. If not yet available, it can be found here:
+// https://www.khronos.org/registry/cl/api/1.1/cl.hpp
+#include "cl.hpp"
+
+// Includes the CLBlast library
+#include <clblast.h>
+
+// =================================================================================================
+
+int main() {
+
+ // OpenCL platform/device settings
+ const auto platform_id = 0;
+ const auto device_id = 0;
+
+ // Example arguments
+ const size_t m = 128;
+ const size_t n = 64;
+ const auto fraction = 1.0; // between 0.0 and 1.0
+
+ // Initializes the OpenCL platform
+ auto platforms = std::vector<cl::Platform>();
+ cl::Platform::get(&platforms);
+ if (platforms.size() == 0 || platform_id >= platforms.size()) { return 1; }
+ auto platform = platforms[platform_id];
+
+ // Initializes the OpenCL device
+ auto devices = std::vector<cl::Device>();
+ platform.getDevices(CL_DEVICE_TYPE_ALL, &devices);
+ if (devices.size() == 0 || device_id >= devices.size()) { return 1; }
+ auto device = devices[device_id];
+
+ // Creates the OpenCL context, queue, and an event
+ auto device_as_vector = std::vector<cl::Device>{device};
+ auto context = cl::Context(device_as_vector);
+ auto queue = cl::CommandQueue(context, device);
+
+ // Performs the tuning
+ printf("Starting the tuning...\n");
+ std::unordered_map<std::string,size_t> parameters;
+ auto queue_plain = queue();
+ auto status = clblast::TuneCopy<float>(&queue_plain, m, n, fraction, parameters);
+
+ // Tuning completed. See "clblast.h" for status codes (0 -> success).
+ printf("Completed TuneCopy with status %d (0 == OK), found parameters:\n", static_cast<int>(status));
+ for (const auto parameter: parameters) {
+ printf("> %s = %zu\n", parameter.first.c_str(), parameter.second);
+ }
+
+ // Set the new parameters
+ status = clblast::OverrideParameters(device(), "Copy", clblast::Precision::kSingle, parameters);
+ printf("Completed OverrideParameters with status %d (0 == OK)\n", static_cast<int>(status));
+ return 0;
+}
+
+// =================================================================================================
diff --git a/scripts/benchmark/utils.py b/scripts/benchmark/utils.py
index 62e18de2..11aad805 100644
--- a/scripts/benchmark/utils.py
+++ b/scripts/benchmark/utils.py
@@ -62,5 +62,8 @@ def parse_results(csv_data):
results = [r for r in results]
for result in results:
for key in result:
- result[key] = float(result[key]) if "." in result[key] else int(result[key])
+ if "i" in result[key]:
+ continue
+ else:
+ result[key] = float(result[key]) if "." in result[key] else int(result[key])
return results
diff --git a/scripts/generator/generator.py b/scripts/generator/generator.py
index 02964c32..32420962 100755
--- a/scripts/generator/generator.py
+++ b/scripts/generator/generator.py
@@ -50,9 +50,9 @@ FILES = [
"/src/pyclblast/src/pyclblast.pyx"
]
HEADER_LINES = [123, 21, 127, 24, 29, 41, 29, 65, 32, 95, 21, 290]
-FOOTER_LINES = [41, 56, 112, 275, 6, 6, 6, 9, 2, 41, 55, 37]
+FOOTER_LINES = [98, 56, 112, 275, 6, 6, 6, 9, 2, 41, 55, 37]
HEADER_LINES_DOC = 0
-FOOTER_LINES_DOC = 158
+FOOTER_LINES_DOC = 232
# Different possibilities for requirements
ald_m = "The value of `a_ld` must be at least `m`."
diff --git a/src/database/apple_cpu_fallback.hpp b/src/database/apple_cpu_fallback.hpp
index e1aa4661..8d257b5e 100644
--- a/src/database/apple_cpu_fallback.hpp
+++ b/src/database/apple_cpu_fallback.hpp
@@ -41,7 +41,7 @@ const DatabaseEntry XgerApple = {
"Xger", Precision::kAny, {"WGS1", "WGS2", "WPT"}, { { kDeviceTypeAll, "default", { { "default", { { kDeviceNameDefault, Params{ 64, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } } } } }
};
const DatabaseEntry XtrsvApple = {
- "Xtrsv", Precision::kAny, {"TRSV_BLOCK_SIZE"}, { { kDeviceTypeAll, "default", { { "default", { { kDeviceNameDefault, Params{ 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } } } } }
+ "Xtrsv", Precision::kAny, {"TRSV_BLOCK_SIZE"}, { { kDeviceTypeAll, "default", { { "default", { { kDeviceNameDefault, Params{ 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } } } } }
};
const DatabaseEntry XgemmApple = {
"Xgemm", Precision::kAny, {"KWG", "KWI", "MDIMA", "MDIMC", "MWG", "NDIMB", "NDIMC", "NWG", "SA", "SB", "STRM", "STRN", "VWM", "VWN"}, { { kDeviceTypeAll, "default", { { "default", { { kDeviceNameDefault, Params{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1 } } } } } } }
diff --git a/src/kernels/level2/xtrsv.opencl b/src/kernels/level2/xtrsv.opencl
index ebea77a3..8777eb77 100644
--- a/src/kernels/level2/xtrsv.opencl
+++ b/src/kernels/level2/xtrsv.opencl
@@ -18,7 +18,7 @@ R"(
// =================================================================================================
#if defined(ROUTINE_TRSV)
-__kernel __attribute__((reqd_work_group_size(64, 1, 1)))
+__kernel __attribute__((reqd_work_group_size(16, 1, 1)))
void FillVector(const int n, const int inc, const int offset,
__global real* restrict dest, const real_arg arg_value) {
const real value = GetRealArg(arg_value);
diff --git a/src/kernels/level3/level3.opencl b/src/kernels/level3/level3.opencl
index 5ba8cf29..c67851df 100644
--- a/src/kernels/level3/level3.opencl
+++ b/src/kernels/level3/level3.opencl
@@ -76,7 +76,7 @@ R"(
// =================================================================================================
#if defined(ROUTINE_INVERT) || defined(ROUTINE_TRSM)
-__kernel __attribute__((reqd_work_group_size(8, 8, 1)))
+__kernel __attribute__((reqd_work_group_size(16, 1, 1)))
void FillMatrix(const int m, const int n, const int ld, const int offset,
__global real* restrict dest, const real_arg arg_value) {
const real value = GetRealArg(arg_value);
diff --git a/src/pyclblast/README.md b/src/pyclblast/README.md
index be37af01..2f6ebed7 100644
--- a/src/pyclblast/README.md
+++ b/src/pyclblast/README.md
@@ -29,3 +29,11 @@ After installation OpenCL and CLBlast, simply use pip to install PyCLBlast, e.g.
pip install --user pyclblast
To start using the library, browse the [CLBlast](https://github.com/CNugteren/CLBlast) documentation or check out the PyCLBlast samples provides in the `samples` subfolder.
+
+
+Testing PyCLBlast
+-------------
+
+The main exhaustive tests are the main CLBlast test binaries. Apart from that, you can also run the PyCLBlast smoke tests from the `test` subfolder, e.g. as follows:
+
+ python -m unittest discover
diff --git a/src/pyclblast/samples/saxpy.py b/src/pyclblast/samples/saxpy.py
index 098e44d5..96b9f766 100644
--- a/src/pyclblast/samples/saxpy.py
+++ b/src/pyclblast/samples/saxpy.py
@@ -32,5 +32,6 @@ cly.set(y)
print("# Example level-1 operation: AXPY")
pyclblast.axpy(queue, n, clx, cly, alpha=alpha)
+queue.finish()
print("# Result for vector y: %s" % cly.get())
print("# Expected result: %s" % (alpha * x + y))
diff --git a/src/pyclblast/samples/sgemm.py b/src/pyclblast/samples/sgemm.py
index c872553f..c233cb6b 100644
--- a/src/pyclblast/samples/sgemm.py
+++ b/src/pyclblast/samples/sgemm.py
@@ -34,5 +34,6 @@ clc.set(c)
print("# Example level-3 operation: GEMM")
pyclblast.gemm(queue, m, n, k, cla, clb, clc, a_ld=k, b_ld=n, c_ld=n)
+queue.finish()
print("# Matrix C result: %s" % clc.get())
print("# Expected result: %s" % (np.dot(a, b)))
diff --git a/src/pyclblast/samples/sgemv.py b/src/pyclblast/samples/sgemv.py
index 196c838d..b7514dbd 100644
--- a/src/pyclblast/samples/sgemv.py
+++ b/src/pyclblast/samples/sgemv.py
@@ -36,5 +36,6 @@ cly.set(y)
print("# Example level-2 operation: GEMV")
pyclblast.gemv(queue, m, n, cla, clx, cly, a_ld=n, alpha=alpha, beta=beta)
+queue.finish()
print("# Result for vector y: %s" % cly.get())
print("# Expected result: %s" % (alpha * np.dot(a, x) + beta * y))
diff --git a/src/pyclblast/test/__init__.py b/src/pyclblast/test/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/pyclblast/test/__init__.py
diff --git a/src/pyclblast/test/test_pyclblast.py b/src/pyclblast/test/test_pyclblast.py
new file mode 100644
index 00000000..aa055d1a
--- /dev/null
+++ b/src/pyclblast/test/test_pyclblast.py
@@ -0,0 +1,81 @@
+
+####################################################################################################
+# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0.
+#
+# Author(s):
+# Cedric Nugteren <www.cedricnugteren.nl>
+#
+# This file test PyCLBlast: the Python interface to CLBlast. It is not exhaustive. For full testing
+# it is recommended to run the regular CLBlast tests, this is just a small smoke test.
+#
+####################################################################################################
+
+import unittest
+
+import numpy as np
+import pyopencl as cl
+from pyopencl.array import Array
+
+import pyclblast
+
+
+class TestPyCLBlast(unittest.TestCase):
+
+ @staticmethod
+ def setup(sizes, dtype):
+ ctx = cl.create_some_context()
+ queue = cl.CommandQueue(ctx)
+ host_arrays, device_arrays = [], []
+ for size in sizes:
+ numpy_array = np.random.rand(*size).astype(dtype=dtype)
+ opencl_array = Array(queue, numpy_array.shape, numpy_array.dtype)
+ opencl_array.set(numpy_array)
+ host_arrays.append(numpy_array)
+ device_arrays.append(opencl_array)
+ queue.finish()
+ return queue, host_arrays, device_arrays
+
+ def test_axpy(self):
+ for dtype in ["float32", "complex64"]:
+ for alpha in [1.0, 3.1]:
+ for n in [1, 7, 32]:
+ queue, h, d = self.setup([(n,), (n,)], dtype=dtype)
+ pyclblast.axpy(queue, n, d[0], d[1], alpha=alpha)
+ queue.finish()
+ result = d[1].get()
+ reference = alpha * h[0] + h[1]
+ for i in range(n):
+ self.assertAlmostEqual(reference[i], result[i], places=3)
+
+ def test_gemv(self):
+ for dtype in ["float32", "complex64"]:
+ for beta in [1.0]:
+ for alpha in [1.0, 3.1]:
+ for m in [1, 7, 32]:
+ for n in [1, 7, 32]:
+ queue, h, d = self.setup([(m, n), (n,), (m,)], dtype=dtype)
+ pyclblast.gemv(queue, m, n, d[0], d[1], d[2],
+ a_ld=n, alpha=alpha, beta=beta)
+ queue.finish()
+ result = d[2].get()
+ reference = alpha * np.dot(h[0], h[1]) + beta * h[2]
+ for i in range(m):
+ self.assertAlmostEqual(reference[i], result[i], places=3)
+
+ def test_gemm(self):
+ for dtype in ["float32", "complex64"]:
+ for beta in [1.0]:
+ for alpha in [1.0, 3.1]:
+ for m in [1, 7, 32]:
+ for n in [1, 7, 32]:
+ for k in [1, 7, 32]:
+ queue, h, d = self.setup([(m, k), (k, n), (m, n)], dtype=dtype)
+ pyclblast.gemm(queue, m, n, k, d[0], d[1], d[2],
+ a_ld=k, b_ld=n, c_ld=n, alpha=alpha, beta=beta)
+ queue.finish()
+ result = d[2].get()
+ reference = alpha * np.dot(h[0], h[1]) + beta * h[2]
+ for i in range(m):
+ for j in range(n):
+ self.assertAlmostEqual(reference[i, j], result[i, j],
+ places=3)
diff --git a/src/routines/common.cpp b/src/routines/common.cpp
index 5b178e53..a4d1f577 100644
--- a/src/routines/common.cpp
+++ b/src/routines/common.cpp
@@ -89,8 +89,8 @@ void FillMatrix(Queue &queue, const Device &device,
kernel.SetArgument(3, static_cast<int>(offset));
kernel.SetArgument(4, dest());
kernel.SetArgument(5, GetRealArg(constant_value));
- auto local = std::vector<size_t>{8, 8};
- auto global = std::vector<size_t>{Ceil(m, 8), Ceil(n, 8)};
+ auto local = std::vector<size_t>{16, 1};
+ auto global = std::vector<size_t>{Ceil(m, 16), n};
RunKernel(kernel, queue, device, global, local, event, waitForEvents);
}
@@ -125,8 +125,8 @@ void FillVector(Queue &queue, const Device &device,
kernel.SetArgument(2, static_cast<int>(offset));
kernel.SetArgument(3, dest());
kernel.SetArgument(4, GetRealArg(constant_value));
- auto local = std::vector<size_t>{64};
- auto global = std::vector<size_t>{Ceil(n, 64)};
+ auto local = std::vector<size_t>{16};
+ auto global = std::vector<size_t>{Ceil(n, 16)};
RunKernel(kernel, queue, device, global, local, event, waitForEvents);
}
diff --git a/src/tuning/configurations.cpp b/src/tuning/configurations.cpp
index 459d66b1..1fe232cf 100644
--- a/src/tuning/configurations.cpp
+++ b/src/tuning/configurations.cpp
@@ -21,11 +21,15 @@ namespace clblast {
// =================================================================================================
// Finds all configurations. It also applies the user-defined constraints within.
-std::vector<Configuration> SetConfigurations(const std::vector<Parameter> parameters,
- const Constraints& constraints) {
+std::vector<Configuration> SetConfigurations(const Device& device,
+ const std::vector<Parameter> parameters,
+ const Constraints& constraints,
+ const LocalMemSizeInfo& local_mem_size_info) {
+ const auto local_mem_max = device.LocalMemSize();
auto config = Configuration();
auto configurations = std::vector<Configuration>();
- PopulateConfigurations(parameters, 0, config, configurations, constraints);
+ PopulateConfigurations(parameters, 0, config, configurations,
+ local_mem_max, constraints, local_mem_size_info);
return configurations;
}
@@ -33,12 +37,14 @@ std::vector<Configuration> SetConfigurations(const std::vector<Parameter> parame
void PopulateConfigurations(const std::vector<Parameter> &parameters,
const size_t index, const Configuration &config,
std::vector<Configuration> &configuration,
- const Constraints& constraints) {
+ const size_t local_mem_max,
+ const Constraints& constraints,
+ const LocalMemSizeInfo& local_mem_size_info) {
// End of the chain: all parameters are considered, store the resulting configuration if it is a
// valid one according to the constraints
if (index == parameters.size()) {
- if (ValidConfiguration(config, constraints)) {
+ if (ValidConfiguration(config, local_mem_max, constraints, local_mem_size_info)) {
configuration.push_back(config);
}
return;
@@ -49,13 +55,16 @@ void PopulateConfigurations(const std::vector<Parameter> &parameters,
for (auto &value: parameter.second) {
auto config_copy = config;
config_copy[parameter.first] = value;
- PopulateConfigurations(parameters, index+1, config_copy, configuration, constraints);
+ PopulateConfigurations(parameters, index+1, config_copy, configuration,
+ local_mem_max, constraints, local_mem_size_info);
}
}
// Loops over all user-defined constraints to check whether or not the configuration is valid
bool ValidConfiguration(const Configuration &config,
- const Constraints& constraints) {
+ const size_t local_mem_max,
+ const Constraints& constraints,
+ const LocalMemSizeInfo& local_mem_size_info) {
// Iterates over all constraints
for (auto &constraint: constraints) {
@@ -72,6 +81,17 @@ bool ValidConfiguration(const Configuration &config,
}
}
+ // Finds the values of the local memory parameters
+ auto local_mem_values = std::vector<size_t>(local_mem_size_info.parameters.size());
+ for (auto i=size_t{0}; i<local_mem_size_info.parameters.size(); ++i) {
+ local_mem_values[i] = config.at(local_mem_size_info.parameters[i]);
+ }
+
+ // Checks the local memory size
+ if (local_mem_size_info.local_mem_size(local_mem_values) > local_mem_max) {
+ return false;
+ }
+
// Everything was OK: this configuration is valid
return true;
}
diff --git a/src/tuning/configurations.hpp b/src/tuning/configurations.hpp
index 74679ff6..faa5498f 100644
--- a/src/tuning/configurations.hpp
+++ b/src/tuning/configurations.hpp
@@ -37,12 +37,21 @@ struct Constraint {
};
using Constraints = std::vector<Constraint>;
+// As above, but for local memory size
+using LocalMemSizeFunction = std::function<size_t(std::vector<size_t>)>;
+struct LocalMemSizeInfo {
+ LocalMemSizeFunction local_mem_size;
+ std::vector<std::string> parameters;
+};
+
// =================================================================================================
// Initializes an empty configuration (vector of name/value pairs) and kicks-off the recursive
// function to find all configurations. It also applies the user-defined constraints within.
-std::vector<Configuration> SetConfigurations(const std::vector<Parameter> parameters,
- const Constraints& constraints);
+std::vector<Configuration> SetConfigurations(const Device& device,
+ const std::vector<Parameter> parameters,
+ const Constraints& constraints,
+ const LocalMemSizeInfo& local_mem_size_info);
// Iterates recursively over all permutations of the user-defined parameters. This code creates
// multiple chains, in which each chain selects a unique combination of values for all parameters.
@@ -51,14 +60,18 @@ std::vector<Configuration> SetConfigurations(const std::vector<Parameter> parame
void PopulateConfigurations(const std::vector<Parameter> &parameters,
const size_t index, const Configuration &config,
std::vector<Configuration> &configuration,
- const Constraints& constraints);
+ const size_t local_mem_max,
+ const Constraints& constraints,
+ const LocalMemSizeInfo& local_mem_size_info);
// Loops over all user-defined constraints to check whether or not the configuration is valid.
// Assumes initially all configurations are valid, then returns false if one of the constraints has
// not been met. Constraints consist of a user-defined function and a list of parameter names, which
// are replaced by parameter values in this function.
bool ValidConfiguration(const Configuration &config,
- const Constraints& constraints);
+ const size_t local_mem_max,
+ const Constraints& constraints,
+ const LocalMemSizeInfo& local_mem_size_info);
// Processes multipliers and dividers to obtain the final thread configuration
std::vector<size_t> SetThreadConfiguration(const Configuration& config,
diff --git a/src/tuning/kernels/copy_fast.cpp b/src/tuning/kernels/copy_fast.cpp
index d046c9e9..13f7ef3c 100644
--- a/src/tuning/kernels/copy_fast.cpp
+++ b/src/tuning/kernels/copy_fast.cpp
@@ -11,86 +11,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgAlpha};
- settings.default_m = 1024;
- settings.default_n = 1024;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "copy";
- settings.kernel_name = "CopyMatrixFast";
- settings.sources =
-#include "../src/kernels/level3/level3.opencl"
-#include "../src/kernels/level3/copy_fast.opencl"
- ;
-
- // Buffer sizes
- settings.size_a = args.m * args.n;
- settings.size_b = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {2, 3};
- settings.outputs = {3};
-
- // Sets the base thread configuration
- settings.global_size = {args.m, args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {8, 8};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"COPY_DIMX", "COPY_DIMY"}};
- settings.div_global = {{"COPY_VW", "COPY_WPT"}};
-
- // Sets the tuning parameters and their possible values
- settings.parameters = {
- {"COPY_DIMX", {8, 16, 32}},
- {"COPY_DIMY", {8, 16, 32}},
- {"COPY_WPT", {1, 2, 4, 8}},
- {"COPY_VW", {1, 2, 4, 8}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
- settings.performance_unit = "GB/s";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(2, buffers[3]()); // 3 == B matrix
- kernel.SetArgument(3, GetRealArg(args.alpha));
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/copy_fast.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -101,11 +22,11 @@ using double2 = clblast::double2;
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::CopyGetTunerDefaults, clblast::CopyGetTunerSettings<half>, clblast::CopyTestValidArguments<half>, clblast::CopySetConstraints, clblast::CopyComputeLocalMemSize<half>, clblast::CopySetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::CopyGetTunerDefaults, clblast::CopyGetTunerSettings<float>, clblast::CopyTestValidArguments<float>, clblast::CopySetConstraints, clblast::CopyComputeLocalMemSize<float>, clblast::CopySetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::CopyGetTunerDefaults, clblast::CopyGetTunerSettings<double>, clblast::CopyTestValidArguments<double>, clblast::CopySetConstraints, clblast::CopyComputeLocalMemSize<double>, clblast::CopySetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::CopyGetTunerDefaults, clblast::CopyGetTunerSettings<float2>, clblast::CopyTestValidArguments<float2>, clblast::CopySetConstraints, clblast::CopyComputeLocalMemSize<float2>, clblast::CopySetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::CopyGetTunerDefaults, clblast::CopyGetTunerSettings<double2>, clblast::CopyTestValidArguments<double2>, clblast::CopySetConstraints, clblast::CopyComputeLocalMemSize<double2>, clblast::CopySetArguments<double2>); break;
}
return 0;
}
diff --git a/src/tuning/kernels/copy_fast.hpp b/src/tuning/kernels/copy_fast.hpp
new file mode 100644
index 00000000..1c4219ae
--- /dev/null
+++ b/src/tuning/kernels/copy_fast.hpp
@@ -0,0 +1,97 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the copy OpenCL kernels.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults CopyGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings CopyGetTunerSettings(const int, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "copy";
+ settings.kernel_name = "CopyMatrixFast";
+ settings.sources =
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/copy_fast.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3};
+ settings.outputs = {3};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"COPY_DIMX", "COPY_DIMY"}};
+ settings.div_global = {{"COPY_VW", "COPY_WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"COPY_DIMX", {8, 16, 32}},
+ {"COPY_DIMY", {8, 16, 32}},
+ {"COPY_WPT", {1, 2, 4, 8}},
+ {"COPY_VW", {1, 2, 4, 8}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void CopyTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> CopySetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo CopyComputeLocalMemSize(const int) {
+ return { [] (std::vector<size_t>) -> size_t { return 0; }, {} };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void CopySetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(2, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(3, GetRealArg(args.alpha));
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/copy_pad.cpp b/src/tuning/kernels/copy_pad.cpp
index 1b483e86..ffaed6ed 100644
--- a/src/tuning/kernels/copy_pad.cpp
+++ b/src/tuning/kernels/copy_pad.cpp
@@ -11,94 +11,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgAlpha};
- settings.default_m = 1024;
- settings.default_n = 1024;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "pad";
- settings.kernel_name = "CopyPadMatrix";
- settings.sources =
-#include "../src/kernels/level3/level3.opencl"
-#include "../src/kernels/level3/copy_pad.opencl"
- ;
-
- // Buffer sizes
- settings.size_a = args.m * args.n;
- settings.size_b = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {2, 3};
- settings.outputs = {3};
-
- // Sets the base thread configuration
- settings.global_size = {args.m, args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {8, 8};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"PAD_DIMX", "PAD_DIMY"}};
- settings.div_global = {{"PAD_WPTX", "PAD_WPTY"}};
-
- // Sets the tuning parameters and their possible values
- settings.parameters = {
- {"PAD_DIMX", {8, 16, 32}},
- {"PAD_DIMY", {8, 16, 32}},
- {"PAD_WPTX", {1, 2, 4}},
- {"PAD_WPTY", {1, 2, 4}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
- settings.performance_unit = "GB/s";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, static_cast<int>(args.n));
- kernel.SetArgument(2, static_cast<int>(args.m));
- kernel.SetArgument(3, 0);
- kernel.SetArgument(4, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(5, static_cast<int>(args.m));
- kernel.SetArgument(6, static_cast<int>(args.n));
- kernel.SetArgument(7, static_cast<int>(args.m));
- kernel.SetArgument(8, 0);
- kernel.SetArgument(9, buffers[3]()); // 3 == B matrix
- kernel.SetArgument(10, GetRealArg(args.alpha));
- kernel.SetArgument(11, 0);
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/copy_pad.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -109,11 +22,11 @@ using double2 = clblast::double2;
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::PadGetTunerDefaults, clblast::PadGetTunerSettings<half>, clblast::PadTestValidArguments<half>, clblast::PadSetConstraints, clblast::PadComputeLocalMemSize<half>, clblast::PadSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::PadGetTunerDefaults, clblast::PadGetTunerSettings<float>, clblast::PadTestValidArguments<float>, clblast::PadSetConstraints, clblast::PadComputeLocalMemSize<float>, clblast::PadSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::PadGetTunerDefaults, clblast::PadGetTunerSettings<double>, clblast::PadTestValidArguments<double>, clblast::PadSetConstraints, clblast::PadComputeLocalMemSize<double>, clblast::PadSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::PadGetTunerDefaults, clblast::PadGetTunerSettings<float2>, clblast::PadTestValidArguments<float2>, clblast::PadSetConstraints, clblast::PadComputeLocalMemSize<float2>, clblast::PadSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::PadGetTunerDefaults, clblast::PadGetTunerSettings<double2>, clblast::PadTestValidArguments<double2>, clblast::PadSetConstraints, clblast::PadComputeLocalMemSize<double2>, clblast::PadSetArguments<double2>); break;
}
return 0;
}
diff --git a/src/tuning/kernels/copy_pad.hpp b/src/tuning/kernels/copy_pad.hpp
new file mode 100644
index 00000000..ada1cf83
--- /dev/null
+++ b/src/tuning/kernels/copy_pad.hpp
@@ -0,0 +1,105 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the pad OpenCL kernels.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults PadGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings PadGetTunerSettings(const int, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "pad";
+ settings.kernel_name = "CopyPadMatrix";
+ settings.sources =
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/copy_pad.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3};
+ settings.outputs = {3};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"PAD_DIMX", "PAD_DIMY"}};
+ settings.div_global = {{"PAD_WPTX", "PAD_WPTY"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"PAD_DIMX", {8, 16, 32}},
+ {"PAD_DIMY", {8, 16, 32}},
+ {"PAD_WPTX", {1, 2, 4}},
+ {"PAD_WPTY", {1, 2, 4}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void PadTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> PadSetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo PadComputeLocalMemSize(const int) {
+ return { [] (std::vector<size_t>) -> size_t { return 0; }, {} };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void PadSetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, static_cast<int>(args.n));
+ kernel.SetArgument(2, static_cast<int>(args.m));
+ kernel.SetArgument(3, 0);
+ kernel.SetArgument(4, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(5, static_cast<int>(args.m));
+ kernel.SetArgument(6, static_cast<int>(args.n));
+ kernel.SetArgument(7, static_cast<int>(args.m));
+ kernel.SetArgument(8, 0);
+ kernel.SetArgument(9, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(10, GetRealArg(args.alpha));
+ kernel.SetArgument(11, 0);
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/invert.cpp b/src/tuning/kernels/invert.cpp
index d846fdf7..3795da88 100644
--- a/src/tuning/kernels/invert.cpp
+++ b/src/tuning/kernels/invert.cpp
@@ -11,99 +11,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgN, kArgM, kArgK};
- settings.default_n = 128; // dimension of input matrix 'n'
- settings.default_m = 64; // block size
- settings.default_k = 16; // current size
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "invert";
- settings.kernel_name = "TripleMatMul16Part1Lower";
- settings.sources =
-"#define ROUTINE_INVERT"
-#include "../src/kernels/level3/invert_diagonal_blocks_part1.opencl"
-#include "../src/kernels/level3/invert_diagonal_blocks_part2.opencl"
- ;
-
- // Buffer sizes
- settings.size_a = args.n * args.n + args.a_offset;
- settings.size_b = Ceil(args.n, args.m) * args.m; // Ceil(n, block_size) * block_size
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {2, 3};
- settings.outputs = {3};
-
- // Sets the base thread configuration
- const auto num_pages = CeilDiv(args.n, args.k * 2); // CeilDiv(n, current_size*2)
- settings.global_size = {args.k / 4, num_pages * (args.k / 16) * 4};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {4, 4};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"TMMWGSX", "TMMWGSY"}};
- settings.div_global = {{}};
-
- // Sets the tuning parameters and their possible values
- // TODO: Make these actually tunable, apart from LOCALPAD
- settings.parameters = {
- {"INTERNAL_BLOCK_SIZE", {16}},
- {"LOCALPAD", {0, 1}},
- {"TMMWGSX", {4}},
- {"TMMWGSY", {4}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 1 * GetBytes(args.precision);
- settings.performance_unit = "N/A";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &args) {
- if (!(args.k == 16)) {
- throw std::runtime_error("'TripleMatMul16Part1Lower' requires 'k' to be 16");
- }
-}
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- const auto num_pages = CeilDiv(args.n, args.k * 2); // CeilDiv(n, current_size*2)
- kernel.SetArgument(0, static_cast<int>(args.n)); // n
- kernel.SetArgument(1, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(2, 0); // a_offset
- kernel.SetArgument(3, static_cast<int>(args.n)); // a_ld
- kernel.SetArgument(4, buffers[3]()); // 3 == B matrix
- kernel.SetArgument(5, static_cast<int>(args.k)); // current_size
- kernel.SetArgument(6, static_cast<int>(num_pages)); // num_pages
- kernel.SetArgument(7, static_cast<int>(args.m)); // block_size
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/invert.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -114,11 +22,11 @@ using double2 = clblast::double2;
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::InvertGetTunerDefaults, clblast::InvertGetTunerSettings<half>, clblast::InvertTestValidArguments<half>, clblast::InvertSetConstraints, clblast::InvertComputeLocalMemSize<half>, clblast::InvertSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::InvertGetTunerDefaults, clblast::InvertGetTunerSettings<float>, clblast::InvertTestValidArguments<float>, clblast::InvertSetConstraints, clblast::InvertComputeLocalMemSize<float>, clblast::InvertSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::InvertGetTunerDefaults, clblast::InvertGetTunerSettings<double>, clblast::InvertTestValidArguments<double>, clblast::InvertSetConstraints, clblast::InvertComputeLocalMemSize<double>, clblast::InvertSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::InvertGetTunerDefaults, clblast::InvertGetTunerSettings<float2>, clblast::InvertTestValidArguments<float2>, clblast::InvertSetConstraints, clblast::InvertComputeLocalMemSize<float2>, clblast::InvertSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::InvertGetTunerDefaults, clblast::InvertGetTunerSettings<double2>, clblast::InvertTestValidArguments<double2>, clblast::InvertSetConstraints, clblast::InvertComputeLocalMemSize<double2>, clblast::InvertSetArguments<double2>); break;
}
return 0;
}
diff --git a/src/tuning/kernels/invert.hpp b/src/tuning/kernels/invert.hpp
new file mode 100644
index 00000000..4f74674d
--- /dev/null
+++ b/src/tuning/kernels/invert.hpp
@@ -0,0 +1,115 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the invert OpenCL kernels.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults InvertGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgN, kArgM, kArgK};
+ settings.default_n = 128; // dimension of input matrix 'n'
+ settings.default_m = 64; // block size
+ settings.default_k = 16; // current size
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings InvertGetTunerSettings(const int, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "invert";
+ settings.kernel_name = "TripleMatMul16Part1Lower";
+ settings.sources =
+"#define ROUTINE_INVERT"
+#include "../src/kernels/level3/invert_diagonal_blocks_part1.opencl"
+#include "../src/kernels/level3/invert_diagonal_blocks_part2.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.n * args.n + args.a_offset;
+ settings.size_b = Ceil(args.n, args.m) * args.m; // Ceil(n, block_size) * block_size
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3};
+ settings.outputs = {3};
+
+ // Sets the base thread configuration
+ const auto num_pages = CeilDiv(args.n, args.k * 2); // CeilDiv(n, current_size*2)
+ settings.global_size = {args.k / 4, num_pages * (args.k / 16) * 4};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {4, 4};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"TMMWGSX", "TMMWGSY"}};
+ settings.div_global = {{}};
+
+ // Sets the tuning parameters and their possible values
+ // TODO: Make these actually tunable, apart from LOCALPAD
+ settings.parameters = {
+ {"INTERNAL_BLOCK_SIZE", {16}},
+ {"LOCALPAD", {0, 1}},
+ {"TMMWGSX", {4}},
+ {"TMMWGSY", {4}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 1 * GetBytes(args.precision);
+ settings.performance_unit = "N/A";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void InvertTestValidArguments(const int, const Arguments<T> &args) {
+ if (!(args.k == 16)) {
+ throw std::runtime_error("'TripleMatMul16Part1Lower' requires 'k' to be 16");
+ }
+}
+std::vector<Constraint> InvertSetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo InvertComputeLocalMemSize(const int) {
+ return {
+ [] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * (16 + v[0]) * 16;
+ },
+ {"LOCALPAD"}
+ };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void InvertSetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ const auto num_pages = CeilDiv(args.n, args.k * 2); // CeilDiv(n, current_size*2)
+ kernel.SetArgument(0, static_cast<int>(args.n)); // n
+ kernel.SetArgument(1, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(2, 0); // a_offset
+ kernel.SetArgument(3, static_cast<int>(args.n)); // a_ld
+ kernel.SetArgument(4, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(5, static_cast<int>(args.k)); // current_size
+ kernel.SetArgument(6, static_cast<int>(num_pages)); // num_pages
+ kernel.SetArgument(7, static_cast<int>(args.m)); // block_size
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/transpose_fast.cpp b/src/tuning/kernels/transpose_fast.cpp
index 5b701a5b..024f7385 100644
--- a/src/tuning/kernels/transpose_fast.cpp
+++ b/src/tuning/kernels/transpose_fast.cpp
@@ -11,86 +11,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgAlpha};
- settings.default_m = 1024;
- settings.default_n = 1024;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "transpose";
- settings.kernel_name = "TransposeMatrixFast";
- settings.sources =
-#include "../src/kernels/level3/level3.opencl"
-#include "../src/kernels/level3/transpose_fast.opencl"
- ;
-
- // Buffer sizes
- settings.size_a = args.m * args.n;
- settings.size_b = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {2, 3};
- settings.outputs = {3};
-
- // Sets the base thread configuration
- settings.global_size = {args.m, args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {8, 8};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"TRA_DIM", "TRA_DIM"}};
- settings.div_global = {{"TRA_WPT", "TRA_WPT"}};
-
- // Sets the tuning parameters and their possible values
- settings.parameters = {
- {"TRA_DIM", {4, 8, 16, 32, 64}},
- {"TRA_WPT", {1, 2, 4, 8, 16}},
- {"TRA_PAD", {0, 1}},
- {"TRA_SHUFFLE", {0, 1}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
- settings.performance_unit = "GB/s";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(2, buffers[3]()); // 3 == B matrix
- kernel.SetArgument(3, GetRealArg(args.alpha));
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/transpose_fast.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -101,11 +22,11 @@ using double2 = clblast::double2;
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::TransposeGetTunerDefaults, clblast::TransposeGetTunerSettings<half>, clblast::TransposeTestValidArguments<half>, clblast::TransposeSetConstraints, clblast::TransposeComputeLocalMemSize<half>, clblast::TransposeSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::TransposeGetTunerDefaults, clblast::TransposeGetTunerSettings<float>, clblast::TransposeTestValidArguments<float>, clblast::TransposeSetConstraints, clblast::TransposeComputeLocalMemSize<float>, clblast::TransposeSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::TransposeGetTunerDefaults, clblast::TransposeGetTunerSettings<double>, clblast::TransposeTestValidArguments<double>, clblast::TransposeSetConstraints, clblast::TransposeComputeLocalMemSize<double>, clblast::TransposeSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::TransposeGetTunerDefaults, clblast::TransposeGetTunerSettings<float2>, clblast::TransposeTestValidArguments<float2>, clblast::TransposeSetConstraints, clblast::TransposeComputeLocalMemSize<float2>, clblast::TransposeSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::TransposeGetTunerDefaults, clblast::TransposeGetTunerSettings<double2>, clblast::TransposeTestValidArguments<double2>, clblast::TransposeSetConstraints, clblast::TransposeComputeLocalMemSize<double2>, clblast::TransposeSetArguments<double2>); break;
}
return 0;
}
diff --git a/src/tuning/kernels/transpose_fast.hpp b/src/tuning/kernels/transpose_fast.hpp
new file mode 100644
index 00000000..c6e3f98d
--- /dev/null
+++ b/src/tuning/kernels/transpose_fast.hpp
@@ -0,0 +1,102 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the transpose OpenCL kernels.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults TransposeGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings TransposeGetTunerSettings(const int, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "transpose";
+ settings.kernel_name = "TransposeMatrixFast";
+ settings.sources =
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/transpose_fast.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3};
+ settings.outputs = {3};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"TRA_DIM", "TRA_DIM"}};
+ settings.div_global = {{"TRA_WPT", "TRA_WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"TRA_DIM", {4, 8, 16, 32, 64}},
+ {"TRA_WPT", {1, 2, 4, 8, 16}},
+ {"TRA_PAD", {0, 1}},
+ {"TRA_SHUFFLE", {0, 1}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void TransposeTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> TransposeSetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo TransposeComputeLocalMemSize(const int) {
+ return {
+ [] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * v[1] * (v[1] * v[0]) * (v[0] + v[2]);
+ },
+ {"TRA_DIM", "TRA_WPT", "TRA_PAD"}
+ };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void TransposeSetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(2, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(3, GetRealArg(args.alpha));
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/transpose_pad.cpp b/src/tuning/kernels/transpose_pad.cpp
index ed24fb04..ffaa252b 100644
--- a/src/tuning/kernels/transpose_pad.cpp
+++ b/src/tuning/kernels/transpose_pad.cpp
@@ -11,93 +11,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgAlpha};
- settings.default_m = 1024;
- settings.default_n = 1024;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "padtranspose";
- settings.kernel_name = "TransposePadMatrix";
- settings.sources =
-#include "../src/kernels/level3/level3.opencl"
-#include "../src/kernels/level3/transpose_pad.opencl"
- ;
-
- // Buffer sizes
- settings.size_a = args.m * args.n;
- settings.size_b = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {2, 3};
- settings.outputs = {3};
-
- // Sets the base thread configuration
- settings.global_size = {args.m, args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {8, 8};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"PADTRA_TILE", "PADTRA_TILE"}};
- settings.div_global = {{"PADTRA_WPT", "PADTRA_WPT"}};
-
- // Sets the tuning parameters and their possible values
- settings.parameters = {
- {"PADTRA_TILE", {8, 16, 32, 64}},
- {"PADTRA_WPT", {1, 2, 4, 8, 16}},
- {"PADTRA_PAD", {0, 1}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
- settings.performance_unit = "GB/s";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, static_cast<int>(args.n));
- kernel.SetArgument(2, static_cast<int>(args.m));
- kernel.SetArgument(3, 0);
- kernel.SetArgument(4, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(5, static_cast<int>(args.n));
- kernel.SetArgument(6, static_cast<int>(args.m));
- kernel.SetArgument(7, static_cast<int>(args.n));
- kernel.SetArgument(8, 0);
- kernel.SetArgument(9, buffers[3]()); // 3 == B matrix
- kernel.SetArgument(10, GetRealArg(args.alpha));
- kernel.SetArgument(11, 0);
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/transpose_pad.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -108,11 +22,11 @@ using double2 = clblast::double2;
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::PadtransposeGetTunerDefaults, clblast::PadtransposeGetTunerSettings<half>, clblast::PadtransposeTestValidArguments<half>, clblast::PadtransposeSetConstraints, clblast::PadtransposeComputeLocalMemSize<half>, clblast::PadtransposeSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::PadtransposeGetTunerDefaults, clblast::PadtransposeGetTunerSettings<float>, clblast::PadtransposeTestValidArguments<float>, clblast::PadtransposeSetConstraints, clblast::PadtransposeComputeLocalMemSize<float>, clblast::PadtransposeSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::PadtransposeGetTunerDefaults, clblast::PadtransposeGetTunerSettings<double>, clblast::PadtransposeTestValidArguments<double>, clblast::PadtransposeSetConstraints, clblast::PadtransposeComputeLocalMemSize<double>, clblast::PadtransposeSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::PadtransposeGetTunerDefaults, clblast::PadtransposeGetTunerSettings<float2>, clblast::PadtransposeTestValidArguments<float2>, clblast::PadtransposeSetConstraints, clblast::PadtransposeComputeLocalMemSize<float2>, clblast::PadtransposeSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::PadtransposeGetTunerDefaults, clblast::PadtransposeGetTunerSettings<double2>, clblast::PadtransposeTestValidArguments<double2>, clblast::PadtransposeSetConstraints, clblast::PadtransposeComputeLocalMemSize<double2>, clblast::PadtransposeSetArguments<double2>); break;
}
return 0;
}
diff --git a/src/tuning/kernels/transpose_pad.hpp b/src/tuning/kernels/transpose_pad.hpp
new file mode 100644
index 00000000..ebc0e4fb
--- /dev/null
+++ b/src/tuning/kernels/transpose_pad.hpp
@@ -0,0 +1,109 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the pad-transpose OpenCL kernels.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults PadtransposeGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings PadtransposeGetTunerSettings(const int, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "padtranspose";
+ settings.kernel_name = "TransposePadMatrix";
+ settings.sources =
+#include "../src/kernels/level3/level3.opencl"
+#include "../src/kernels/level3/transpose_pad.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.n;
+ settings.size_b = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3};
+ settings.outputs = {3};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"PADTRA_TILE", "PADTRA_TILE"}};
+ settings.div_global = {{"PADTRA_WPT", "PADTRA_WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"PADTRA_TILE", {8, 16, 32, 64}},
+ {"PADTRA_WPT", {1, 2, 4, 8, 16}},
+ {"PADTRA_PAD", {0, 1}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void PadtransposeTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> PadtransposeSetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo PadtransposeComputeLocalMemSize(const int) {
+ return {
+ [] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * (v[1] * v[0]) * (v[1] * v[0] + v[2]);
+ },
+ {"PADTRA_TILE", "PADTRA_WPT", "PADTRA_PAD"}
+ };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void PadtransposeSetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, static_cast<int>(args.n));
+ kernel.SetArgument(2, static_cast<int>(args.m));
+ kernel.SetArgument(3, 0);
+ kernel.SetArgument(4, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(5, static_cast<int>(args.n));
+ kernel.SetArgument(6, static_cast<int>(args.m));
+ kernel.SetArgument(7, static_cast<int>(args.n));
+ kernel.SetArgument(8, 0);
+ kernel.SetArgument(9, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(10, GetRealArg(args.alpha));
+ kernel.SetArgument(11, 0);
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/xaxpy.cpp b/src/tuning/kernels/xaxpy.cpp
index dd44018c..681876ea 100644
--- a/src/tuning/kernels/xaxpy.cpp
+++ b/src/tuning/kernels/xaxpy.cpp
@@ -11,88 +11,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgN, kArgAlpha};
- settings.default_n = 4096*1024;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "xaxpy";
- settings.kernel_name = "XaxpyFastest";
- settings.sources =
-#include "../src/kernels/level1/level1.opencl"
-#include "../src/kernels/level1/xaxpy.opencl"
- ;
-
- // Buffer sizes
- settings.size_x = args.n;
- settings.size_y = args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {0, 1};
- settings.outputs = {1};
-
- // Sets the base thread configuration
- settings.global_size = {args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1};
- settings.local_size_ref = {64};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"WGS"}};
- settings.div_global = {{"WPT"},{"VW"}};
-
- // Sets the tuning parameters and their possible values
- settings.parameters = {
- {"WGS", {64, 128, 256, 512, 1024, 2048}},
- {"WPT", {1, 2, 4, 8}},
- {"VW", {1, 2, 4, 8}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 3 * args.n * GetBytes(args.precision);
- settings.performance_unit = "GB/s";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &args) {
- if (!IsMultiple(args.n, 64)) {
- throw std::runtime_error("'XaxpyFastest' requires 'n' to be a multiple of WGS*WPT*VW");
- }
-}
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.n));
- kernel.SetArgument(1, GetRealArg(args.alpha));
- kernel.SetArgument(2, buffers[0]()); // 0 == X vector
- kernel.SetArgument(3, buffers[1]()); // 1 == Y vector
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/xaxpy.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -103,11 +22,11 @@ using double2 = clblast::double2;
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::XaxpyGetTunerDefaults, clblast::XaxpyGetTunerSettings<half>, clblast::XaxpyTestValidArguments<half>, clblast::XaxpySetConstraints, clblast::XaxpyComputeLocalMemSize<half>, clblast::XaxpySetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::XaxpyGetTunerDefaults, clblast::XaxpyGetTunerSettings<float>, clblast::XaxpyTestValidArguments<float>, clblast::XaxpySetConstraints, clblast::XaxpyComputeLocalMemSize<float>, clblast::XaxpySetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::XaxpyGetTunerDefaults, clblast::XaxpyGetTunerSettings<double>, clblast::XaxpyTestValidArguments<double>, clblast::XaxpySetConstraints, clblast::XaxpyComputeLocalMemSize<double>, clblast::XaxpySetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::XaxpyGetTunerDefaults, clblast::XaxpyGetTunerSettings<float2>, clblast::XaxpyTestValidArguments<float2>, clblast::XaxpySetConstraints, clblast::XaxpyComputeLocalMemSize<float2>, clblast::XaxpySetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::XaxpyGetTunerDefaults, clblast::XaxpyGetTunerSettings<double2>, clblast::XaxpyTestValidArguments<double2>, clblast::XaxpySetConstraints, clblast::XaxpyComputeLocalMemSize<double2>, clblast::XaxpySetArguments<double2>); break;
}
return 0;
}
diff --git a/src/tuning/kernels/xaxpy.hpp b/src/tuning/kernels/xaxpy.hpp
new file mode 100644
index 00000000..ab2c45f0
--- /dev/null
+++ b/src/tuning/kernels/xaxpy.hpp
@@ -0,0 +1,99 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the xaxpy OpenCL kernels.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults XaxpyGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgN, kArgAlpha};
+ settings.default_n = 4096*1024;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings XaxpyGetTunerSettings(const int, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "xaxpy";
+ settings.kernel_name = "XaxpyFastest";
+ settings.sources =
+#include "../src/kernels/level1/level1.opencl"
+#include "../src/kernels/level1/xaxpy.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_x = args.n;
+ settings.size_y = args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {0, 1};
+ settings.outputs = {1};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1};
+ settings.local_size_ref = {64};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"WGS"}};
+ settings.div_global = {{"WPT"},{"VW"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"WGS", {64, 128, 256, 512, 1024, 2048}},
+ {"WPT", {1, 2, 4, 8}},
+ {"VW", {1, 2, 4, 8}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 3 * args.n * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void XaxpyTestValidArguments(const int, const Arguments<T> &args) {
+ if (!IsMultiple(args.n, 64)) {
+ throw std::runtime_error("'XaxpyFastest' requires 'n' to be a multiple of WGS*WPT*VW");
+ }
+}
+std::vector<Constraint> XaxpySetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo XaxpyComputeLocalMemSize(const int) {
+ return { [] (std::vector<size_t>) -> size_t { return 0; }, {} };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void XaxpySetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.n));
+ kernel.SetArgument(1, GetRealArg(args.alpha));
+ kernel.SetArgument(2, buffers[0]()); // 0 == X vector
+ kernel.SetArgument(3, buffers[1]()); // 1 == Y vector
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/xdot.cpp b/src/tuning/kernels/xdot.cpp
index 635d012a..a481f23b 100644
--- a/src/tuning/kernels/xdot.cpp
+++ b/src/tuning/kernels/xdot.cpp
@@ -12,94 +12,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgN};
- settings.default_n = 2*1024*1024;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int V, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "xdot_"+std::to_string(V);
- settings.kernel_name = (V==1) ? "Xdot" : "XdotEpilogue";
- settings.sources =
-#include "../src/kernels/level1/xdot.opencl"
- ;
-
- // Buffer sizes
- settings.size_x = args.n;
- settings.size_y = args.n;
- settings.size_temp = args.n; // Worst case
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {0, 1, 5};
- settings.outputs = {}; // no output checking
-
- // Sets the base thread configuration
- settings.global_size = (V==1) ? std::vector<size_t>{2*64} : std::vector<size_t>{1};
- settings.global_size_ref = (V==1) ? std::vector<size_t>{2*64*64} : std::vector<size_t>{64};
- settings.local_size = {1};
- settings.local_size_ref = {64};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = (V==1) ? TransformVector{{"WGS1"}} : TransformVector{{"WGS2"}};
- settings.mul_global = (V==1) ? TransformVector{{"WGS1"}} : TransformVector{{"WGS2"}};
-
- // Sets the tuning parameters and their possible values
- settings.parameters = {
- {"WGS"+std::to_string(V), {32, 64, 128, 256, 512, 1024}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = (V==1) ? (2*args.n + 1) * GetBytes(args.precision) : 1 * GetBytes(args.precision);
- settings.performance_unit = (V==1) ? "GB/s" : "N/A";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int V, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- if (V == 1) {
- kernel.SetArgument(0, static_cast<int>(args.n));
- kernel.SetArgument(1, buffers[0]()); // 0 == X vector
- kernel.SetArgument(2, 0);
- kernel.SetArgument(3, 1);
- kernel.SetArgument(4, buffers[1]()); // 1 == Y vector
- kernel.SetArgument(5, 0);
- kernel.SetArgument(6, 1);
- kernel.SetArgument(7, buffers[5]()); // 5 == temp; no output checking - size varies
- kernel.SetArgument(8, static_cast<int>(false));
- }
- else {
- kernel.SetArgument(0, buffers[5]()); // 5 == temp
- kernel.SetArgument(1, buffers[0]()); // 0 == X vector; no output checking - size varies
- kernel.SetArgument(2, 0);
- }
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/xdot.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -111,11 +24,11 @@ template <int V>
void StartVariation(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::XdotGetTunerDefaults, clblast::XdotGetTunerSettings<half>, clblast::XdotTestValidArguments<half>, clblast::XdotSetConstraints, clblast::XdotComputeLocalMemSize<half>, clblast::XdotSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::XdotGetTunerDefaults, clblast::XdotGetTunerSettings<float>, clblast::XdotTestValidArguments<float>, clblast::XdotSetConstraints, clblast::XdotComputeLocalMemSize<float>, clblast::XdotSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::XdotGetTunerDefaults, clblast::XdotGetTunerSettings<double>, clblast::XdotTestValidArguments<double>, clblast::XdotSetConstraints, clblast::XdotComputeLocalMemSize<double>, clblast::XdotSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::XdotGetTunerDefaults, clblast::XdotGetTunerSettings<float2>, clblast::XdotTestValidArguments<float2>, clblast::XdotSetConstraints, clblast::XdotComputeLocalMemSize<float2>, clblast::XdotSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::XdotGetTunerDefaults, clblast::XdotGetTunerSettings<double2>, clblast::XdotTestValidArguments<double2>, clblast::XdotSetConstraints, clblast::XdotComputeLocalMemSize<double2>, clblast::XdotSetArguments<double2>); break;
}
}
diff --git a/src/tuning/kernels/xdot.hpp b/src/tuning/kernels/xdot.hpp
new file mode 100644
index 00000000..901d8fd0
--- /dev/null
+++ b/src/tuning/kernels/xdot.hpp
@@ -0,0 +1,110 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the xdot OpenCL kernels. Note that the results are
+// not verified, since the result is not final and depends on the WGS2 parameter.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults XdotGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgN};
+ settings.default_n = 2*1024*1024;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings XdotGetTunerSettings(const int V, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "xdot_"+std::to_string(V);
+ settings.kernel_name = (V==1) ? "Xdot" : "XdotEpilogue";
+ settings.sources =
+#include "../src/kernels/level1/xdot.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_x = args.n;
+ settings.size_y = args.n;
+ settings.size_temp = args.n; // Worst case
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {0, 1, 5};
+ settings.outputs = {}; // no output checking
+
+ // Sets the base thread configuration
+ settings.global_size = (V==1) ? std::vector<size_t>{2*64} : std::vector<size_t>{1};
+ settings.global_size_ref = (V==1) ? std::vector<size_t>{2*64*64} : std::vector<size_t>{64};
+ settings.local_size = {1};
+ settings.local_size_ref = {64};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = (V==1) ? TransformVector{{"WGS1"}} : TransformVector{{"WGS2"}};
+ settings.mul_global = (V==1) ? TransformVector{{"WGS1"}} : TransformVector{{"WGS2"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {32, 64, 128, 256, 512, 1024}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = (V==1) ? (2*args.n + 1) * GetBytes(args.precision) : 1 * GetBytes(args.precision);
+ settings.performance_unit = (V==1) ? "GB/s" : "N/A";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void XdotTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> XdotSetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo XdotComputeLocalMemSize(const int V) {
+ return {
+ [] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * v[0];
+ },
+ {"WGS"+std::to_string(V)}
+ };}
+
+// Sets the kernel's arguments
+template <typename T>
+void XdotSetArguments(const int V, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ if (V == 1) {
+ kernel.SetArgument(0, static_cast<int>(args.n));
+ kernel.SetArgument(1, buffers[0]()); // 0 == X vector
+ kernel.SetArgument(2, 0);
+ kernel.SetArgument(3, 1);
+ kernel.SetArgument(4, buffers[1]()); // 1 == Y vector
+ kernel.SetArgument(5, 0);
+ kernel.SetArgument(6, 1);
+ kernel.SetArgument(7, buffers[5]()); // 5 == temp; no output checking - size varies
+ kernel.SetArgument(8, static_cast<int>(false));
+ }
+ else {
+ kernel.SetArgument(0, buffers[5]()); // 5 == temp
+ kernel.SetArgument(1, buffers[0]()); // 0 == X vector; no output checking - size varies
+ kernel.SetArgument(2, 0);
+ }
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/xgemm.cpp b/src/tuning/kernels/xgemm.cpp
index 5c242757..85948373 100644
--- a/src/tuning/kernels/xgemm.cpp
+++ b/src/tuning/kernels/xgemm.cpp
@@ -7,162 +7,11 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
-// This file uses the auto-tuner to tune the xgemm OpenCL kernels. There are two variations:
-// - V==1: This tests some limited set of tuning parameters exhaustively.
-// - V==2: This tests a much larger set of tuning parameters by randomly sampling a subset.
+// This file uses the auto-tuner to tune the xgemm OpenCL kernels.
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int V) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
- kArgHeuristicSelection, kArgPsoSwarmSize,
- kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
- settings.default_m = 1024;
- settings.default_n = 1024;
- settings.default_k = 1024;
- settings.default_fraction = (V==1) ? 1.0 : 512.0; // test all or sample randomly
- settings.default_num_runs = 2;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int V, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = (V==1) ? "xgemm_1" : "xgemm_2";
- settings.kernel_name = "Xgemm";
- settings.sources =
-#include "../src/kernels/level3/xgemm_part1.opencl"
-#include "../src/kernels/level3/xgemm_part2.opencl"
-#include "../src/kernels/level3/xgemm_part3.opencl"
-#include "../src/kernels/level3/xgemm_part4.opencl"
- ;
-
- // Buffer sizes
- settings.size_a = args.m * args.k;
- settings.size_b = args.n * args.k;
- settings.size_c = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {2, 3, 4};
- settings.outputs = {4};
-
- // Sets the base thread configuration
- settings.global_size = {args.m, args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {8, 8};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"MDIMC", "NDIMC"}};
- settings.mul_global = {{"MDIMC", "NDIMC"}};
- settings.div_global = {{"MWG", "NWG"}};
-
- // Sets the tuning parameters and their possible values
- if (V==1) { // limited subset of tuning parameters - but explorable exhaustively
- settings.parameters = {
- {"MWG", {16, 32, 64}},
- {"NWG", {16, 32, 64}},
- {"KWG", {32}},
- {"MDIMC", {8, 16, 32}},
- {"NDIMC", {8, 16, 32}},
- {"MDIMA", {8, 16, 32}},
- {"NDIMB", {8, 16, 32}},
- {"KWI", {2}},
- {"VWM", {1, 2, 4}},
- {"VWN", {1, 2, 4}},
- {"STRM", {0}},
- {"STRN", {0}},
- {"SA", {0, 1}},
- {"SB", {0, 1}},
- };
- }
- else { // a lot more tuning parameters - has to be sampled randomly, too much to test all
- settings.parameters = {
- {"MWG", {16, 32, 64, 128}},
- {"NWG", {16, 32, 64, 128}},
- {"KWG", {16, 32}},
- {"MDIMC", {8, 16, 32}},
- {"NDIMC", {8, 16, 32}},
- {"MDIMA", {8, 16, 32}},
- {"NDIMB", {8, 16, 32}},
- {"KWI", {2}},
- {"VWM", {1, 2, 4, 8}},
- {"VWN", {1, 2, 4, 8}},
- {"STRM", {0, 1}},
- {"STRN", {0, 1}},
- {"SA", {0, 1}},
- {"SB", {0, 1}},
- };
- }
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 2 * args.m * args.n * args.k;
- settings.performance_unit = "GFLOPS";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int V) {
- auto constraints = std::vector<Constraint>();
- auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
- auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
- auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
- // Requirement for unrolling the KWG loop
- constraints.push_back({MultipleOfX, {"KWG", "KWI"}});
- // Required for integer MWI and NWI
- constraints.push_back({MultipleOfXMulY, {"MWG", "MDIMC", "VWM"}});
- constraints.push_back({MultipleOfXMulY, {"NWG", "NDIMC", "VWN"}});
- // Required for integer MWIA and NWIB
- constraints.push_back({MultipleOfXMulY, {"MWG", "MDIMA", "VWM"}});
- constraints.push_back({MultipleOfXMulY, {"NWG", "NDIMB", "VWN"}});
- // KWG has to be a multiple of KDIMA = ((MDIMC*NDIMC)/(MDIMA)) and KDIMB = (...)
- constraints.push_back({MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "MDIMA"}});
- constraints.push_back({MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "NDIMB"}});
-
- // Extra constraints for variation 1 to limit the set of options significantly
- if (V==1) {
- auto IsEqual = [] (std::vector<size_t> v) { return v[0] == v[1]; };
- constraints.push_back({IsEqual, {"MDIMC", "MDIMA"}});
- constraints.push_back({IsEqual, {"NDIMC", "NDIMB"}});
- constraints.push_back({IsEqual, {"SA", "SB"}});
- }
- return constraints;
-}
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, static_cast<int>(args.n));
- kernel.SetArgument(2, static_cast<int>(args.k));
- kernel.SetArgument(3, GetRealArg(args.alpha));
- kernel.SetArgument(4, GetRealArg(args.beta));
- kernel.SetArgument(5, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(6, buffers[3]()); // 3 == B matrix
- kernel.SetArgument(7, buffers[4]()); // 4 == C matrix
- kernel.SetArgument(8, 0);
- kernel.SetArgument(9, 0);
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/xgemm.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -174,11 +23,11 @@ template <int V>
void StartVariation(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::XgemmGetTunerDefaults, clblast::XgemmGetTunerSettings<half>, clblast::XgemmTestValidArguments<half>, clblast::XgemmSetConstraints, clblast::XgemmComputeLocalMemSize<half>, clblast::XgemmSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::XgemmGetTunerDefaults, clblast::XgemmGetTunerSettings<float>, clblast::XgemmTestValidArguments<float>, clblast::XgemmSetConstraints, clblast::XgemmComputeLocalMemSize<float>, clblast::XgemmSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::XgemmGetTunerDefaults, clblast::XgemmGetTunerSettings<double>, clblast::XgemmTestValidArguments<double>, clblast::XgemmSetConstraints, clblast::XgemmComputeLocalMemSize<double>, clblast::XgemmSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::XgemmGetTunerDefaults, clblast::XgemmGetTunerSettings<float2>, clblast::XgemmTestValidArguments<float2>, clblast::XgemmSetConstraints, clblast::XgemmComputeLocalMemSize<float2>, clblast::XgemmSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::XgemmGetTunerDefaults, clblast::XgemmGetTunerSettings<double2>, clblast::XgemmTestValidArguments<double2>, clblast::XgemmSetConstraints, clblast::XgemmComputeLocalMemSize<double2>, clblast::XgemmSetArguments<double2>); break;
}
}
diff --git a/src/tuning/kernels/xgemm.hpp b/src/tuning/kernels/xgemm.hpp
new file mode 100644
index 00000000..5f191ba9
--- /dev/null
+++ b/src/tuning/kernels/xgemm.hpp
@@ -0,0 +1,174 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the xgemm OpenCL kernels. There are two variations:
+// - V==1: This tests some limited set of tuning parameters exhaustively.
+// - V==2: This tests a much larger set of tuning parameters by randomly sampling a subset.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults XgemmGetTunerDefaults(const int V) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
+ kArgHeuristicSelection, kArgPsoSwarmSize,
+ kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ settings.default_k = 1024;
+ settings.default_fraction = (V==1) ? 1.0 : 512.0; // test all or sample randomly
+ settings.default_num_runs = 2;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings XgemmGetTunerSettings(const int V, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = (V==1) ? "xgemm_1" : "xgemm_2";
+ settings.kernel_name = "Xgemm";
+ settings.sources =
+#include "../src/kernels/level3/xgemm_part1.opencl"
+#include "../src/kernels/level3/xgemm_part2.opencl"
+#include "../src/kernels/level3/xgemm_part3.opencl"
+#include "../src/kernels/level3/xgemm_part4.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.k;
+ settings.size_b = args.n * args.k;
+ settings.size_c = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3, 4};
+ settings.outputs = {4};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"MDIMC", "NDIMC"}};
+ settings.mul_global = {{"MDIMC", "NDIMC"}};
+ settings.div_global = {{"MWG", "NWG"}};
+
+ // Sets the tuning parameters and their possible values
+ if (V==1) { // limited subset of tuning parameters - but explorable exhaustively
+ settings.parameters = {
+ {"MWG", {16, 32, 64}},
+ {"NWG", {16, 32, 64}},
+ {"KWG", {32}},
+ {"MDIMC", {8, 16, 32}},
+ {"NDIMC", {8, 16, 32}},
+ {"MDIMA", {8, 16, 32}},
+ {"NDIMB", {8, 16, 32}},
+ {"KWI", {2}},
+ {"VWM", {1, 2, 4}},
+ {"VWN", {1, 2, 4}},
+ {"STRM", {0}},
+ {"STRN", {0}},
+ {"SA", {0, 1}},
+ {"SB", {0, 1}},
+ };
+ }
+ else { // a lot more tuning parameters - has to be sampled randomly, too much to test all
+ settings.parameters = {
+ {"MWG", {16, 32, 64, 128}},
+ {"NWG", {16, 32, 64, 128}},
+ {"KWG", {16, 32}},
+ {"MDIMC", {8, 16, 32}},
+ {"NDIMC", {8, 16, 32}},
+ {"MDIMA", {8, 16, 32}},
+ {"NDIMB", {8, 16, 32}},
+ {"KWI", {2}},
+ {"VWM", {1, 2, 4, 8}},
+ {"VWN", {1, 2, 4, 8}},
+ {"STRM", {0, 1}},
+ {"STRN", {0, 1}},
+ {"SA", {0, 1}},
+ {"SB", {0, 1}},
+ };
+ }
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * args.k;
+ settings.performance_unit = "GFLOPS";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void XgemmTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> XgemmSetConstraints(const int V) {
+ auto constraints = std::vector<Constraint>();
+ auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
+ auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
+ auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
+ // Requirement for unrolling the KWG loop
+ constraints.push_back({MultipleOfX, {"KWG", "KWI"}});
+ // Required for integer MWI and NWI
+ constraints.push_back({MultipleOfXMulY, {"MWG", "MDIMC", "VWM"}});
+ constraints.push_back({MultipleOfXMulY, {"NWG", "NDIMC", "VWN"}});
+ // Required for integer MWIA and NWIB
+ constraints.push_back({MultipleOfXMulY, {"MWG", "MDIMA", "VWM"}});
+ constraints.push_back({MultipleOfXMulY, {"NWG", "NDIMB", "VWN"}});
+ // KWG has to be a multiple of KDIMA = ((MDIMC*NDIMC)/(MDIMA)) and KDIMB = (...)
+ constraints.push_back({MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "MDIMA"}});
+ constraints.push_back({MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "NDIMB"}});
+
+ // Extra constraints for variation 1 to limit the set of options significantly
+ if (V==1) {
+ auto IsEqual = [] (std::vector<size_t> v) { return v[0] == v[1]; };
+ constraints.push_back({IsEqual, {"MDIMC", "MDIMA"}});
+ constraints.push_back({IsEqual, {"NDIMC", "NDIMB"}});
+ constraints.push_back({IsEqual, {"SA", "SB"}});
+ }
+ return constraints;
+}
+template <typename T>
+LocalMemSizeInfo XgemmComputeLocalMemSize(const int) {
+ return {
+ [] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * ((v[0]*v[1]*v[2]) + (v[3]*v[4]*v[5]));
+ },
+ {"SA", "KWG", "MWG", "SB", "KWG", "NWG"}
+ };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void XgemmSetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, static_cast<int>(args.n));
+ kernel.SetArgument(2, static_cast<int>(args.k));
+ kernel.SetArgument(3, GetRealArg(args.alpha));
+ kernel.SetArgument(4, GetRealArg(args.beta));
+ kernel.SetArgument(5, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(6, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(7, buffers[4]()); // 4 == C matrix
+ kernel.SetArgument(8, 0);
+ kernel.SetArgument(9, 0);
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/xgemm_direct.cpp b/src/tuning/kernels/xgemm_direct.cpp
index 0bd2e94d..73c2217c 100644
--- a/src/tuning/kernels/xgemm_direct.cpp
+++ b/src/tuning/kernels/xgemm_direct.cpp
@@ -7,159 +7,11 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
-// This file uses the auto-tuner to tune the direct xgemm kernels. There are two variations:
-// - V==1: This tests some limited set of tuning parameters exhaustively.
-// - V==2: This tests a much larger set of tuning parameters by randomly sampling a subset.
+// This file uses the auto-tuner to tune the direct xgemm kernels.
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int V) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
- kArgHeuristicSelection, kArgPsoSwarmSize,
- kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
- settings.default_m = 256;
- settings.default_n = 256;
- settings.default_k = 256;
- settings.default_fraction = (V==1) ? 1.0 : 64.0; // test all or sample randomly
- settings.default_num_runs = 4;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int V, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = (V==1) ? "xgemm_direct_1" : "xgemm_direct_2";
- settings.kernel_name = "XgemmDirectTN";
- settings.sources =
-#include "../src/kernels/level3/xgemm_direct_part1.opencl"
-#include "../src/kernels/level3/xgemm_direct_part2.opencl"
-#include "../src/kernels/level3/xgemm_direct_part3.opencl"
- ;
-
- // Buffer sizes
- settings.size_a = args.m * args.k;
- settings.size_b = args.n * args.k;
- settings.size_c = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {2, 3, 4};
- settings.outputs = {4};
-
- // Sets the base thread configuration
- settings.global_size = {args.m, args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {8, 8};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"MDIMCD", "NDIMCD"}};
- settings.mul_global = {{"MDIMCD", "NDIMCD"}};
- settings.div_global = {{"WGD", "WGD"}};
-
- // Sets the tuning parameters and their possible values
- if (V==1) { // limited subset of tuning parameters - but explorable exhaustively
- settings.parameters = {
- {"WGD", {8, 16, 32}},
- {"MDIMCD", {8, 16, 32}},
- {"NDIMCD", {8, 16, 32}},
- {"MDIMAD", {8, 16, 32}},
- {"NDIMBD", {8, 16, 32}},
- {"KWID", {2}},
- {"VWMD", {1, 2, 4, 8}},
- {"VWND", {1, 2, 4, 8}},
- {"PADA", {1}},
- {"PADB", {1}},
- };
- }
- else { // a lot more tuning parameters - has to be sampled randomly, too much to test all
- settings.parameters = {
- {"WGD", {8, 16, 32, 64}},
- {"MDIMCD", {8, 16, 32}},
- {"NDIMCD", {8, 16, 32}},
- {"MDIMAD", {8, 16, 32}},
- {"NDIMBD", {8, 16, 32}},
- {"KWID", {2, 8, 16}},
- {"VWMD", {1, 2, 4, 8}},
- {"VWND", {1, 2, 4, 8}},
- {"PADA", {0, 1}},
- {"PADB", {0, 1}},
- };
- }
-
- // Describes how to compute the performance metrics
- settings.metric_amount = 2 * args.m * args.n * args.k;
- settings.performance_unit = "GFLOPS";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int V) {
- auto constraints = std::vector<Constraint>();
- auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
- auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
- auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
- // Requirement for unrolling the WGD loop
- constraints.push_back({MultipleOfX, {"WGD", "KWID"}});
- // Required for integer MWID and NWID
- constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMCD", "VWMD"}});
- constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMCD", "VWND"}});
- // Required for integer MWIAD and NWIBD
- constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMAD", "VWMD"}});
- constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMBD", "VWND"}});
- // WGD has to be a multiple of KDIMAD = ((MDIMCD*NDIMCD)/(MDIMAD)) and KDIMBD = (...)
- constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "MDIMAD"}});
- constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "NDIMBD"}});
-
- // Extra constraints for variation 1 to limit the set of options significantly
- if (V==1) {
- auto IsEqual = [] (std::vector<size_t> v) { return v[0] == v[1]; };
- constraints.push_back({IsEqual, {"MDIMCD", "MDIMAD"}});
- constraints.push_back({IsEqual, {"NDIMCD", "NDIMBD"}});
- }
- return constraints;
-}
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, static_cast<int>(args.n));
- kernel.SetArgument(2, static_cast<int>(args.k));
- kernel.SetArgument(3, GetRealArg(args.alpha));
- kernel.SetArgument(4, GetRealArg(args.beta));
- kernel.SetArgument(5, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(6, 0); // a_offset
- kernel.SetArgument(7, static_cast<int>(args.k)); // a_ld
- kernel.SetArgument(8, buffers[3]()); // 3 == B matrix
- kernel.SetArgument(9, 0); // b_offset
- kernel.SetArgument(10, static_cast<int>(args.n)); // b_ld
- kernel.SetArgument(11, buffers[4]()); // 4 == C matrix
- kernel.SetArgument(12, 0); // c_offset
- kernel.SetArgument(13, static_cast<int>(args.n)); // c_ld
- kernel.SetArgument(14, 1); // c_do_transpose
- kernel.SetArgument(15, 0); // a_conjugate
- kernel.SetArgument(16, 0); // b_conjugate
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/xgemm_direct.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -171,11 +23,11 @@ template <int V>
void StartVariation(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::XgemmDirectGetTunerDefaults, clblast::XgemmDirectGetTunerSettings<half>, clblast::XgemmDirectTestValidArguments<half>, clblast::XgemmDirectSetConstraints, clblast::XgemmDirectComputeLocalMemSize<half>, clblast::XgemmDirectSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::XgemmDirectGetTunerDefaults, clblast::XgemmDirectGetTunerSettings<float>, clblast::XgemmDirectTestValidArguments<float>, clblast::XgemmDirectSetConstraints, clblast::XgemmDirectComputeLocalMemSize<float>, clblast::XgemmDirectSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::XgemmDirectGetTunerDefaults, clblast::XgemmDirectGetTunerSettings<double>, clblast::XgemmDirectTestValidArguments<double>, clblast::XgemmDirectSetConstraints, clblast::XgemmDirectComputeLocalMemSize<double>, clblast::XgemmDirectSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::XgemmDirectGetTunerDefaults, clblast::XgemmDirectGetTunerSettings<float2>, clblast::XgemmDirectTestValidArguments<float2>, clblast::XgemmDirectSetConstraints, clblast::XgemmDirectComputeLocalMemSize<float2>, clblast::XgemmDirectSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::XgemmDirectGetTunerDefaults, clblast::XgemmDirectGetTunerSettings<double2>, clblast::XgemmDirectTestValidArguments<double2>, clblast::XgemmDirectSetConstraints, clblast::XgemmDirectComputeLocalMemSize<double2>, clblast::XgemmDirectSetArguments<double2>); break;
}
}
diff --git a/src/tuning/kernels/xgemm_direct.hpp b/src/tuning/kernels/xgemm_direct.hpp
new file mode 100644
index 00000000..baa063c0
--- /dev/null
+++ b/src/tuning/kernels/xgemm_direct.hpp
@@ -0,0 +1,171 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the direct xgemm kernels. There are two variations:
+// - V==1: This tests some limited set of tuning parameters exhaustively.
+// - V==2: This tests a much larger set of tuning parameters by randomly sampling a subset.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults XgemmDirectGetTunerDefaults(const int V) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgK, kArgAlpha, kArgBeta, kArgFraction,
+ kArgHeuristicSelection, kArgPsoSwarmSize,
+ kArgPsoInfGlobal, kArgPsoInfLocal, kArgPsoInfRandom};
+ settings.default_m = 256;
+ settings.default_n = 256;
+ settings.default_k = 256;
+ settings.default_fraction = (V==1) ? 1.0 : 64.0; // test all or sample randomly
+ settings.default_num_runs = 4;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings XgemmDirectGetTunerSettings(const int V, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = (V==1) ? "xgemm_direct_1" : "xgemm_direct_2";
+ settings.kernel_name = "XgemmDirectTN";
+ settings.sources =
+#include "../src/kernels/level3/xgemm_direct_part1.opencl"
+#include "../src/kernels/level3/xgemm_direct_part2.opencl"
+#include "../src/kernels/level3/xgemm_direct_part3.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_a = args.m * args.k;
+ settings.size_b = args.n * args.k;
+ settings.size_c = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3, 4};
+ settings.outputs = {4};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"MDIMCD", "NDIMCD"}};
+ settings.mul_global = {{"MDIMCD", "NDIMCD"}};
+ settings.div_global = {{"WGD", "WGD"}};
+
+ // Sets the tuning parameters and their possible values
+ if (V==1) { // limited subset of tuning parameters - but explorable exhaustively
+ settings.parameters = {
+ {"WGD", {8, 16, 32}},
+ {"MDIMCD", {8, 16, 32}},
+ {"NDIMCD", {8, 16, 32}},
+ {"MDIMAD", {8, 16, 32}},
+ {"NDIMBD", {8, 16, 32}},
+ {"KWID", {2}},
+ {"VWMD", {1, 2, 4, 8}},
+ {"VWND", {1, 2, 4, 8}},
+ {"PADA", {1}},
+ {"PADB", {1}},
+ };
+ }
+ else { // a lot more tuning parameters - has to be sampled randomly, too much to test all
+ settings.parameters = {
+ {"WGD", {8, 16, 32, 64}},
+ {"MDIMCD", {8, 16, 32}},
+ {"NDIMCD", {8, 16, 32}},
+ {"MDIMAD", {8, 16, 32}},
+ {"NDIMBD", {8, 16, 32}},
+ {"KWID", {2, 8, 16}},
+ {"VWMD", {1, 2, 4, 8}},
+ {"VWND", {1, 2, 4, 8}},
+ {"PADA", {0, 1}},
+ {"PADB", {0, 1}},
+ };
+ }
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = 2 * args.m * args.n * args.k;
+ settings.performance_unit = "GFLOPS";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void XgemmDirectTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> XgemmDirectSetConstraints(const int V) {
+ auto constraints = std::vector<Constraint>();
+ auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
+ auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
+ auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
+ // Requirement for unrolling the WGD loop
+ constraints.push_back({MultipleOfX, {"WGD", "KWID"}});
+ // Required for integer MWID and NWID
+ constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMCD", "VWMD"}});
+ constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMCD", "VWND"}});
+ // Required for integer MWIAD and NWIBD
+ constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMAD", "VWMD"}});
+ constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMBD", "VWND"}});
+ // WGD has to be a multiple of KDIMAD = ((MDIMCD*NDIMCD)/(MDIMAD)) and KDIMBD = (...)
+ constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "MDIMAD"}});
+ constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "NDIMBD"}});
+
+ // Extra constraints for variation 1 to limit the set of options significantly
+ if (V==1) {
+ auto IsEqual = [] (std::vector<size_t> v) { return v[0] == v[1]; };
+ constraints.push_back({IsEqual, {"MDIMCD", "MDIMAD"}});
+ constraints.push_back({IsEqual, {"NDIMCD", "NDIMBD"}});
+ }
+ return constraints;
+}
+template <typename T>
+LocalMemSizeInfo XgemmDirectComputeLocalMemSize(const int) {
+ return {
+ [] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * ((v[0]*(v[0] + v[1]) + v[0]*(v[0] + v[2])));
+ },
+ {"WGD", "PADA", "PADB"}
+ };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void XgemmDirectSetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, static_cast<int>(args.n));
+ kernel.SetArgument(2, static_cast<int>(args.k));
+ kernel.SetArgument(3, GetRealArg(args.alpha));
+ kernel.SetArgument(4, GetRealArg(args.beta));
+ kernel.SetArgument(5, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(6, 0); // a_offset
+ kernel.SetArgument(7, static_cast<int>(args.k)); // a_ld
+ kernel.SetArgument(8, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(9, 0); // b_offset
+ kernel.SetArgument(10, static_cast<int>(args.n)); // b_ld
+ kernel.SetArgument(11, buffers[4]()); // 4 == C matrix
+ kernel.SetArgument(12, 0); // c_offset
+ kernel.SetArgument(13, static_cast<int>(args.n)); // c_ld
+ kernel.SetArgument(14, 1); // c_do_transpose
+ kernel.SetArgument(15, 0); // a_conjugate
+ kernel.SetArgument(16, 0); // b_conjugate
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/xgemv.cpp b/src/tuning/kernels/xgemv.cpp
index 965fc53d..6505a081 100644
--- a/src/tuning/kernels/xgemv.cpp
+++ b/src/tuning/kernels/xgemv.cpp
@@ -7,135 +7,11 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
-// This file uses the auto-tuner to tune the xgemv OpenCL kernels. Three variants are tuned:
-// 1: The full version of the kernel
-// 2: The fast version for non-transposed matrices
-// 3: The fast version for transposed matrices
+// This file uses the auto-tuner to tune the xgemv OpenCL kernels.
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgAlpha, kArgBeta};
- settings.default_m = 2048;
- settings.default_n = 2048;
- settings.default_num_runs = 4;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int V, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = (V==1) ? "xgemv" : ((V==2) ? "xgemv_fast" : "xgemv_fast_rot");
- settings.kernel_name = (V==1) ? "Xgemv" : ((V==2) ? "XgemvFast" : "XgemvFastRot");
- settings.sources =
-#include "../src/kernels/level2/xgemv.opencl"
-#include "../src/kernels/level2/xgemv_fast.opencl"
- ;
-
- // Buffer sizes
- settings.size_x = args.n;
- settings.size_y = args.m;
- settings.size_a = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {0, 1, 2};
- settings.outputs = {1};
-
- // Sets the base thread configuration
- settings.global_size = {args.m};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1};
- settings.local_size_ref = {64};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"WGS"+std::to_string(V)}};
- settings.div_global = (V==1 || V==2) ? TransformVector{{"WPT"+std::to_string(V)}} : TransformVector{};
-
- // Sets the tuning parameters and their possible values
- if (V==1) {
- settings.parameters = {
- {"WGS"+std::to_string(V), {32, 64, 128, 256}},
- {"WPT"+std::to_string(V), {1, 2, 4}},
- };
- }
- if (V==2) {
- settings.parameters = {
- {"WGS"+std::to_string(V), {16, 32, 64, 128, 256}},
- {"WPT"+std::to_string(V), {1, 2, 4}},
- {"VW"+std::to_string(V), {1, 2, 4, 8}},
- };
- }
- if (V==3) {
- settings.parameters = {
- {"WGS"+std::to_string(V), {16, 32, 64, 128}},
- {"WPT"+std::to_string(V), {1, 2, 4, 8, 16, 32}},
- {"VW"+std::to_string(V), {1, 2, 4, 8}},
- };
- }
-
- // Describes how to compute the performance metrics
- settings.metric_amount = (args.m*args.n + 2*args.m + args.n) * GetBytes(args.precision);
- settings.performance_unit = "GB/s";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int V) {
- auto constraints = std::vector<Constraint>();
- if (V==2 || V==3) {
- auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
- constraints.push_back({MultipleOfX, {"WPT"+std::to_string(V), "VW"+std::to_string(V)}});
- }
- if (V==3) {
- auto LargerOrEqual = [] (std::vector<size_t> v) { return v[0] >= v[1]; };
- constraints.push_back({LargerOrEqual, {"WGS"+std::to_string(V), "WPT"+std::to_string(V)}});
- }
- return constraints;
-}
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int V, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- auto a_rotated = (V==3) ? 1 : 0;
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, static_cast<int>(args.n));
- kernel.SetArgument(2, GetRealArg(args.alpha));
- kernel.SetArgument(3, GetRealArg(args.beta));
- kernel.SetArgument(4, a_rotated);
- kernel.SetArgument(5, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(6, 0);
- kernel.SetArgument(7, static_cast<int>(args.m));
- kernel.SetArgument(8, buffers[0]()); // 0 == X vector
- kernel.SetArgument(9, 0);
- kernel.SetArgument(10, 1);
- kernel.SetArgument(11, buffers[1]()); // 1 == Y vector
- kernel.SetArgument(12, 0);
- kernel.SetArgument(13, 1);
- kernel.SetArgument(14, 0); // Conjugate transpose
- kernel.SetArgument(15, 0); // Additional parameter
- kernel.SetArgument(16, 0); // Banded 'kl'
- kernel.SetArgument(17, 0); // Banded 'ku'
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/xgemv.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -147,11 +23,11 @@ template <int V>
void StartVariation(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, V, clblast::XgemvGetTunerDefaults, clblast::XgemvGetTunerSettings<half>, clblast::XgemvTestValidArguments<half>, clblast::XgemvSetConstraints, clblast::XgemvComputeLocalMemSize<half>, clblast::XgemvSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, V, clblast::XgemvGetTunerDefaults, clblast::XgemvGetTunerSettings<float>, clblast::XgemvTestValidArguments<float>, clblast::XgemvSetConstraints, clblast::XgemvComputeLocalMemSize<float>, clblast::XgemvSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, V, clblast::XgemvGetTunerDefaults, clblast::XgemvGetTunerSettings<double>, clblast::XgemvTestValidArguments<double>, clblast::XgemvSetConstraints, clblast::XgemvComputeLocalMemSize<double>, clblast::XgemvSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, V, clblast::XgemvGetTunerDefaults, clblast::XgemvGetTunerSettings<float2>, clblast::XgemvTestValidArguments<float2>, clblast::XgemvSetConstraints, clblast::XgemvComputeLocalMemSize<float2>, clblast::XgemvSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, V, clblast::XgemvGetTunerDefaults, clblast::XgemvGetTunerSettings<double2>, clblast::XgemvTestValidArguments<double2>, clblast::XgemvSetConstraints, clblast::XgemvComputeLocalMemSize<double2>, clblast::XgemvSetArguments<double2>); break;
}
}
diff --git a/src/tuning/kernels/xgemv.hpp b/src/tuning/kernels/xgemv.hpp
new file mode 100644
index 00000000..c582816e
--- /dev/null
+++ b/src/tuning/kernels/xgemv.hpp
@@ -0,0 +1,155 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the xgemv OpenCL kernels. Three variants are tuned:
+// 1: The full version of the kernel
+// 2: The fast version for non-transposed matrices
+// 3: The fast version for transposed matrices
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults XgemvGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha, kArgBeta};
+ settings.default_m = 2048;
+ settings.default_n = 2048;
+ settings.default_num_runs = 4;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings XgemvGetTunerSettings(const int V, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = (V==1) ? "xgemv" : ((V==2) ? "xgemv_fast" : "xgemv_fast_rot");
+ settings.kernel_name = (V==1) ? "Xgemv" : ((V==2) ? "XgemvFast" : "XgemvFastRot");
+ settings.sources =
+#include "../src/kernels/level2/xgemv.opencl"
+#include "../src/kernels/level2/xgemv_fast.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_x = args.n;
+ settings.size_y = args.m;
+ settings.size_a = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {0, 1, 2};
+ settings.outputs = {1};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1};
+ settings.local_size_ref = {64};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"WGS"+std::to_string(V)}};
+ settings.div_global = (V==1 || V==2) ? TransformVector{{"WPT"+std::to_string(V)}} : TransformVector{};
+
+ // Sets the tuning parameters and their possible values
+ if (V==1) {
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {32, 64, 128, 256}},
+ {"WPT"+std::to_string(V), {1, 2, 4}},
+ };
+ }
+ if (V==2) {
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {16, 32, 64, 128, 256}},
+ {"WPT"+std::to_string(V), {1, 2, 4}},
+ {"VW"+std::to_string(V), {1, 2, 4, 8}},
+ };
+ }
+ if (V==3) {
+ settings.parameters = {
+ {"WGS"+std::to_string(V), {16, 32, 64, 128}},
+ {"WPT"+std::to_string(V), {1, 2, 4, 8, 16, 32}},
+ {"VW"+std::to_string(V), {1, 2, 4, 8}},
+ };
+ }
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = (args.m*args.n + 2*args.m + args.n) * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void XgemvTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> XgemvSetConstraints(const int V) {
+ auto constraints = std::vector<Constraint>();
+ if (V==2 || V==3) {
+ auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
+ constraints.push_back({MultipleOfX, {"WPT"+std::to_string(V), "VW"+std::to_string(V)}});
+ }
+ if (V==3) {
+ auto LargerOrEqual = [] (std::vector<size_t> v) { return v[0] >= v[1]; };
+ constraints.push_back({LargerOrEqual, {"WGS"+std::to_string(V), "WPT"+std::to_string(V)}});
+ }
+ return constraints;
+}
+template <typename T>
+LocalMemSizeInfo XgemvComputeLocalMemSize(const int V) {
+ if (V == 1 || V == 2) {
+ return {
+ [V] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * v[0];
+ },
+ {"WGS" + std::to_string(V)}
+ };
+ }
+ return {
+ [V] (std::vector<size_t> v) -> size_t {
+ return GetBytes(PrecisionValue<T>()) * (v[0] + v[1] * v[2]);
+ },
+ {"WGS3", "WPT3", "WGS3"}
+ };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void XgemvSetArguments(const int V, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ auto a_rotated = (V==3) ? 1 : 0;
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, static_cast<int>(args.n));
+ kernel.SetArgument(2, GetRealArg(args.alpha));
+ kernel.SetArgument(3, GetRealArg(args.beta));
+ kernel.SetArgument(4, a_rotated);
+ kernel.SetArgument(5, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(6, 0);
+ kernel.SetArgument(7, static_cast<int>(args.m));
+ kernel.SetArgument(8, buffers[0]()); // 0 == X vector
+ kernel.SetArgument(9, 0);
+ kernel.SetArgument(10, 1);
+ kernel.SetArgument(11, buffers[1]()); // 1 == Y vector
+ kernel.SetArgument(12, 0);
+ kernel.SetArgument(13, 1);
+ kernel.SetArgument(14, 0); // Conjugate transpose
+ kernel.SetArgument(15, 0); // Additional parameter
+ kernel.SetArgument(16, 0); // Banded 'kl'
+ kernel.SetArgument(17, 0); // Banded 'ku'
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/kernels/xger.cpp b/src/tuning/kernels/xger.cpp
index a88fb5d6..e4c9fc03 100644
--- a/src/tuning/kernels/xger.cpp
+++ b/src/tuning/kernels/xger.cpp
@@ -11,95 +11,7 @@
//
// =================================================================================================
-#include <string>
-#include <vector>
-
-#include "utilities/utilities.hpp"
-#include "tuning/tuning.hpp"
-
-namespace clblast {
-// =================================================================================================
-
-// Settings for this kernel (default command-line arguments)
-TunerDefaults GetTunerDefaults(const int) {
- auto settings = TunerDefaults();
- settings.options = {kArgM, kArgN, kArgAlpha};
- settings.default_m = 1024;
- settings.default_n = 1024;
- return settings;
-}
-
-// Settings for this kernel (general)
-template <typename T>
-TunerSettings GetTunerSettings(const int, const Arguments<T> &args) {
- auto settings = TunerSettings();
-
- // Identification of the kernel
- settings.kernel_family = "xger";
- settings.kernel_name = "Xger";
- settings.sources =
-#include "../src/kernels/level2/level2.opencl"
-#include "../src/kernels/level2/xger.opencl"
- ;
-
- // Buffer sizes
- settings.size_x = args.m;
- settings.size_y = args.n;
- settings.size_a = args.m * args.n;
-
- // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
- settings.inputs = {0, 1, 2};
- settings.outputs = {2};
-
- // Sets the base thread configuration
- settings.global_size = {args.m, args.n};
- settings.global_size_ref = settings.global_size;
- settings.local_size = {1, 1};
- settings.local_size_ref = {8, 8};
-
- // Transforms the thread configuration based on the parameters
- settings.mul_local = {{"WGS1", "WGS2"}};
- settings.div_global = {{"WPT", "WPT"}};
-
- // Sets the tuning parameters and their possible values
- settings.parameters = {
- {"WGS1", {4, 8, 16, 32, 64, 128, 256, 512}},
- {"WGS2", {1, 2, 4, 8, 16, 32, 64, 128, 256}},
- {"WPT", {1, 2, 4}},
- };
-
- // Describes how to compute the performance metrics
- settings.metric_amount = (2*args.m*args.n + args.m + args.n) * GetBytes(args.precision);
- settings.performance_unit = "GB/s";
-
- return settings;
-}
-
-// Tests for valid arguments
-template <typename T>
-void TestValidArguments(const int, const Arguments<T> &) { }
-std::vector<Constraint> SetConstraints(const int) { return {}; }
-
-// Sets the kernel's arguments
-template <typename T>
-void SetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
- kernel.SetArgument(0, static_cast<int>(args.m));
- kernel.SetArgument(1, static_cast<int>(args.n));
- kernel.SetArgument(2, GetRealArg(args.alpha));
- kernel.SetArgument(3, buffers[0]()); // 0 == X vector
- kernel.SetArgument(4, 0); // x_offset
- kernel.SetArgument(5, 1); // x_increment
- kernel.SetArgument(6, buffers[1]()); // 1 == Y vector
- kernel.SetArgument(7, 0); // y_offset
- kernel.SetArgument(8, 1); // y_increment
- kernel.SetArgument(9, buffers[2]()); // 2 == A matrix
- kernel.SetArgument(10, 0); // a_offset
- kernel.SetArgument(11, static_cast<int>(args.m)); // a_ld
- kernel.SetArgument(12, 0); // a_is_rowmajor
-}
-
-// =================================================================================================
-} // namespace clblast
+#include "tuning/kernels/xger.hpp"
// Shortcuts to the clblast namespace
using half = clblast::half;
@@ -110,11 +22,11 @@ using double2 = clblast::double2;
int main(int argc, char *argv[]) {
const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv);
switch(clblast::GetPrecision(command_line_args)) {
- case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<half>, clblast::TestValidArguments<half>, clblast::SetConstraints, clblast::SetArguments<half>); break;
- case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float>, clblast::TestValidArguments<float>, clblast::SetConstraints, clblast::SetArguments<float>); break;
- case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double>, clblast::TestValidArguments<double>, clblast::SetConstraints, clblast::SetArguments<double>); break;
- case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<float2>, clblast::TestValidArguments<float2>, clblast::SetConstraints, clblast::SetArguments<float2>); break;
- case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::GetTunerDefaults, clblast::GetTunerSettings<double2>, clblast::TestValidArguments<double2>, clblast::SetConstraints, clblast::SetArguments<double2>); break;
+ case clblast::Precision::kHalf: clblast::Tuner<half>(argc, argv, 0, clblast::XgerGetTunerDefaults, clblast::XgerGetTunerSettings<half>, clblast::XgerTestValidArguments<half>, clblast::XgerSetConstraints, clblast::XgerComputeLocalMemSize<half>, clblast::XgerSetArguments<half>); break;
+ case clblast::Precision::kSingle: clblast::Tuner<float>(argc, argv, 0, clblast::XgerGetTunerDefaults, clblast::XgerGetTunerSettings<float>, clblast::XgerTestValidArguments<float>, clblast::XgerSetConstraints, clblast::XgerComputeLocalMemSize<float>, clblast::XgerSetArguments<float>); break;
+ case clblast::Precision::kDouble: clblast::Tuner<double>(argc, argv, 0, clblast::XgerGetTunerDefaults, clblast::XgerGetTunerSettings<double>, clblast::XgerTestValidArguments<double>, clblast::XgerSetConstraints, clblast::XgerComputeLocalMemSize<double>, clblast::XgerSetArguments<double>); break;
+ case clblast::Precision::kComplexSingle: clblast::Tuner<float2>(argc, argv, 0, clblast::XgerGetTunerDefaults, clblast::XgerGetTunerSettings<float2>, clblast::XgerTestValidArguments<float2>, clblast::XgerSetConstraints, clblast::XgerComputeLocalMemSize<float2>, clblast::XgerSetArguments<float2>); break;
+ case clblast::Precision::kComplexDouble: clblast::Tuner<double2>(argc, argv, 0, clblast::XgerGetTunerDefaults, clblast::XgerGetTunerSettings<double2>, clblast::XgerTestValidArguments<double2>, clblast::XgerSetConstraints, clblast::XgerComputeLocalMemSize<double2>, clblast::XgerSetArguments<double2>); break;
}
return 0;
}
diff --git a/src/tuning/kernels/xger.hpp b/src/tuning/kernels/xger.hpp
new file mode 100644
index 00000000..0473572d
--- /dev/null
+++ b/src/tuning/kernels/xger.hpp
@@ -0,0 +1,106 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file uses the auto-tuner to tune the xger OpenCL kernels.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "utilities/utilities.hpp"
+#include "tuning/tuning.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// Settings for this kernel (default command-line arguments)
+TunerDefaults XgerGetTunerDefaults(const int) {
+ auto settings = TunerDefaults();
+ settings.options = {kArgM, kArgN, kArgAlpha};
+ settings.default_m = 1024;
+ settings.default_n = 1024;
+ return settings;
+}
+
+// Settings for this kernel (general)
+template <typename T>
+TunerSettings XgerGetTunerSettings(const int, const Arguments<T> &args) {
+ auto settings = TunerSettings();
+
+ // Identification of the kernel
+ settings.kernel_family = "xger";
+ settings.kernel_name = "Xger";
+ settings.sources =
+#include "../src/kernels/level2/level2.opencl"
+#include "../src/kernels/level2/xger.opencl"
+ ;
+
+ // Buffer sizes
+ settings.size_x = args.m;
+ settings.size_y = args.n;
+ settings.size_a = args.m * args.n;
+
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {0, 1, 2};
+ settings.outputs = {2};
+
+ // Sets the base thread configuration
+ settings.global_size = {args.m, args.n};
+ settings.global_size_ref = settings.global_size;
+ settings.local_size = {1, 1};
+ settings.local_size_ref = {8, 8};
+
+ // Transforms the thread configuration based on the parameters
+ settings.mul_local = {{"WGS1", "WGS2"}};
+ settings.div_global = {{"WPT", "WPT"}};
+
+ // Sets the tuning parameters and their possible values
+ settings.parameters = {
+ {"WGS1", {4, 8, 16, 32, 64, 128, 256, 512}},
+ {"WGS2", {1, 2, 4, 8, 16, 32, 64, 128, 256}},
+ {"WPT", {1, 2, 4}},
+ };
+
+ // Describes how to compute the performance metrics
+ settings.metric_amount = (2*args.m*args.n + args.m + args.n) * GetBytes(args.precision);
+ settings.performance_unit = "GB/s";
+
+ return settings;
+}
+
+// Tests for valid arguments
+template <typename T>
+void XgerTestValidArguments(const int, const Arguments<T> &) { }
+std::vector<Constraint> XgerSetConstraints(const int) { return {}; }
+template <typename T>
+LocalMemSizeInfo XgerComputeLocalMemSize(const int) {
+ return { [] (std::vector<size_t>) -> size_t { return 0; }, {} };
+}
+
+// Sets the kernel's arguments
+template <typename T>
+void XgerSetArguments(const int, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, static_cast<int>(args.n));
+ kernel.SetArgument(2, GetRealArg(args.alpha));
+ kernel.SetArgument(3, buffers[0]()); // 0 == X vector
+ kernel.SetArgument(4, 0); // x_offset
+ kernel.SetArgument(5, 1); // x_increment
+ kernel.SetArgument(6, buffers[1]()); // 1 == Y vector
+ kernel.SetArgument(7, 0); // y_offset
+ kernel.SetArgument(8, 1); // y_increment
+ kernel.SetArgument(9, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(10, 0); // a_offset
+ kernel.SetArgument(11, static_cast<int>(args.m)); // a_ld
+ kernel.SetArgument(12, 0); // a_is_rowmajor
+}
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/tuning.cpp b/src/tuning/tuning.cpp
index b5e01f65..dd4a83e6 100644
--- a/src/tuning/tuning.cpp
+++ b/src/tuning/tuning.cpp
@@ -93,6 +93,7 @@ void Tuner(int argc, char* argv[], const int V,
GetTunerSettingsFunc<T> GetTunerSettings,
TestValidArgumentsFunc<T> TestValidArguments,
SetConstraintsFunc SetConstraints,
+ ComputeLocalMemSizeFunc<T> ComputeLocalMemSize,
SetArgumentsFunc<T> SetArguments) {
constexpr auto kSeed = 42; // fixed seed for reproducibility
@@ -171,7 +172,8 @@ void Tuner(int argc, char* argv[], const int V,
}
// Sets the tunable parameters and their possible values
- auto configurations = SetConfigurations(settings.parameters, SetConstraints(V));
+ auto configurations = SetConfigurations(device, settings.parameters,
+ SetConstraints(V), ComputeLocalMemSize(V));
printf("* Found %s%zu configuration(s)%s\n",
kPrintMessage.c_str(), configurations.size(), kPrintEnd.c_str());
@@ -380,11 +382,11 @@ void Tuner(int argc, char* argv[], const int V,
}
// Compiles the above function
-template void Tuner<half>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<half> GetTunerSettings, TestValidArgumentsFunc<half> TestValidArguments, SetConstraintsFunc SetConstraints, SetArgumentsFunc<half> SetArguments);
-template void Tuner<float>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<float> GetTunerSettings, TestValidArgumentsFunc<float> TestValidArguments, SetConstraintsFunc SetConstraints, SetArgumentsFunc<float> SetArguments);
-template void Tuner<double>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<double> GetTunerSettings, TestValidArgumentsFunc<double> TestValidArguments, SetConstraintsFunc SetConstraints, SetArgumentsFunc<double> SetArguments);
-template void Tuner<float2>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<float2> GetTunerSettings, TestValidArgumentsFunc<float2> TestValidArguments, SetConstraintsFunc SetConstraints, SetArgumentsFunc<float2> SetArguments);
-template void Tuner<double2>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<double2> GetTunerSettings, TestValidArgumentsFunc<double2> TestValidArguments, SetConstraintsFunc SetConstraints, SetArgumentsFunc<double2> SetArguments);
+template void Tuner<half>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<half> GetTunerSettings, TestValidArgumentsFunc<half> TestValidArguments, SetConstraintsFunc SetConstraints, ComputeLocalMemSizeFunc<half> ComputeLocalMemSize, SetArgumentsFunc<half> SetArguments);
+template void Tuner<float>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<float> GetTunerSettings, TestValidArgumentsFunc<float> TestValidArguments, SetConstraintsFunc SetConstraints, ComputeLocalMemSizeFunc<float> ComputeLocalMemSize, SetArgumentsFunc<float> SetArguments);
+template void Tuner<double>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<double> GetTunerSettings, TestValidArgumentsFunc<double> TestValidArguments, SetConstraintsFunc SetConstraints, ComputeLocalMemSizeFunc<double> ComputeLocalMemSize, SetArgumentsFunc<double> SetArguments);
+template void Tuner<float2>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<float2> GetTunerSettings, TestValidArgumentsFunc<float2> TestValidArguments, SetConstraintsFunc SetConstraints, ComputeLocalMemSizeFunc<float2> ComputeLocalMemSize, SetArgumentsFunc<float2> SetArguments);
+template void Tuner<double2>(int argc, char* argv[], const int V, GetTunerDefaultsFunc GetTunerDefaults, GetTunerSettingsFunc<double2> GetTunerSettings, TestValidArgumentsFunc<double2> TestValidArguments, SetConstraintsFunc SetConstraints, ComputeLocalMemSizeFunc<double2> ComputeLocalMemSize, SetArgumentsFunc<double2> SetArguments);
// =================================================================================================
} // namespace clblast
diff --git a/src/tuning/tuning.hpp b/src/tuning/tuning.hpp
index ee7e0087..37a042ff 100644
--- a/src/tuning/tuning.hpp
+++ b/src/tuning/tuning.hpp
@@ -108,6 +108,8 @@ template <typename T>
using TestValidArgumentsFunc = std::function<void(const int V, const Arguments<T> &args)>;
using SetConstraintsFunc = std::function<std::vector<Constraint>(const int V)>;
template <typename T>
+using ComputeLocalMemSizeFunc = std::function<LocalMemSizeInfo(const int V)>;
+template <typename T>
using SetArgumentsFunc = std::function<void(const int V, Kernel &kernel, const Arguments<T> &args, std::vector<Buffer<T>>& buffers)>;
// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
@@ -119,8 +121,20 @@ void Tuner(int argc, char* argv[], const int V,
GetTunerSettingsFunc<T> GetTunerSettings,
TestValidArgumentsFunc<T> TestValidArguments,
SetConstraintsFunc SetConstraints,
+ ComputeLocalMemSizeFunc<T> ComputeLocalMemSize,
SetArgumentsFunc<T> SetArguments);
+// Function to run the tuners through the CLBlast API, no I/O
+template <typename T>
+StatusCode TunerAPI(Queue &queue, const Arguments<T> &args, const int V,
+ const GetTunerDefaultsFunc GetTunerDefaults,
+ const GetTunerSettingsFunc<T> GetTunerSettings,
+ const TestValidArgumentsFunc<T> TestValidArguments,
+ const SetConstraintsFunc SetConstraints,
+ const ComputeLocalMemSizeFunc<T> ComputeLocalMemSize,
+ const SetArgumentsFunc<T> SetArguments,
+ std::unordered_map<std::string,size_t> &parameters);
+
// =================================================================================================
} // namespace clblast
diff --git a/src/tuning/tuning_api.cpp b/src/tuning/tuning_api.cpp
new file mode 100644
index 00000000..f37b3600
--- /dev/null
+++ b/src/tuning/tuning_api.cpp
@@ -0,0 +1,387 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the parameter configurations for the CLBlast auto-tuner (taken from CLTune).
+// This is only used for the optional tuner binaries and not part of the core of CLBlast.
+//
+// =================================================================================================
+
+#include <vector>
+#include <string>
+#include <random>
+#include <utility>
+#include <algorithm>
+
+#include "tuning/tuning.hpp"
+#include "tuning/kernels/xaxpy.hpp"
+#include "tuning/kernels/xdot.hpp"
+#include "tuning/kernels/xgemv.hpp"
+#include "tuning/kernels/xger.hpp"
+#include "tuning/kernels/xgemm.hpp"
+#include "tuning/kernels/xgemm_direct.hpp"
+#include "tuning/kernels/copy_fast.hpp"
+#include "tuning/kernels/copy_pad.hpp"
+#include "tuning/kernels/transpose_fast.hpp"
+#include "tuning/kernels/transpose_pad.hpp"
+#include "tuning/kernels/invert.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+template <typename T>
+StatusCode TuneXaxpy(RawCommandQueue * queue, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 0, XaxpyGetTunerDefaults, XaxpyGetTunerSettings<T>,
+ XaxpyTestValidArguments<T>, XaxpySetConstraints, XaxpyComputeLocalMemSize<T>, XaxpySetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneXaxpy<half>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXaxpy<float>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXaxpy<double>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXaxpy<float2>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXaxpy<double2>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneXdot(RawCommandQueue * queue, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ auto status = TunerAPI<T>(queue_cpp, args, 1, XdotGetTunerDefaults, XdotGetTunerSettings<T>,
+ XdotTestValidArguments<T>, XdotSetConstraints, XdotComputeLocalMemSize<T>, XdotSetArguments<T>, parameters);
+ if (status != StatusCode::kSuccess) { return status; }
+ return TunerAPI<T>(queue_cpp, args, 2, XdotGetTunerDefaults, XdotGetTunerSettings<T>,
+ XdotTestValidArguments<T>, XdotSetConstraints, XdotComputeLocalMemSize<T>, XdotSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneXdot<half>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXdot<float>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXdot<double>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXdot<float2>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXdot<double2>(RawCommandQueue*, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneXgemv(RawCommandQueue * queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ auto status = TunerAPI<T>(queue_cpp, args, 1, XgemvGetTunerDefaults, XgemvGetTunerSettings<T>,
+ XgemvTestValidArguments<T>, XgemvSetConstraints, XgemvComputeLocalMemSize<T>, XgemvSetArguments<T>, parameters);
+ if (status != StatusCode::kSuccess) { return status; }
+ status = TunerAPI<T>(queue_cpp, args, 2, XgemvGetTunerDefaults, XgemvGetTunerSettings<T>,
+ XgemvTestValidArguments<T>, XgemvSetConstraints, XgemvComputeLocalMemSize<T>, XgemvSetArguments<T>, parameters);
+ if (status != StatusCode::kSuccess) { return status; }
+ return TunerAPI<T>(queue_cpp, args, 3, XgemvGetTunerDefaults, XgemvGetTunerSettings<T>,
+ XgemvTestValidArguments<T>, XgemvSetConstraints, XgemvComputeLocalMemSize<T>, XgemvSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneXgemv<half>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemv<float>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemv<double>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemv<float2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemv<double2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneXger(RawCommandQueue * queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 0, XgerGetTunerDefaults, XgerGetTunerSettings<T>,
+ XgerTestValidArguments<T>, XgerSetConstraints, XgerComputeLocalMemSize<T>, XgerSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneXger<half>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXger<float>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXger<double>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXger<float2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXger<double2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneXgemm(RawCommandQueue * queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n; args.k = k;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 2, XgemmGetTunerDefaults, XgemmGetTunerSettings<T>,
+ XgemmTestValidArguments<T>, XgemmSetConstraints, XgemmComputeLocalMemSize<T>, XgemmSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneXgemm<half>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemm<float>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemm<double>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemm<float2>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemm<double2>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneXgemmDirect(RawCommandQueue * queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n; args.k = k;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 2, XgemmDirectGetTunerDefaults, XgemmDirectGetTunerSettings<T>,
+ XgemmDirectTestValidArguments<T>, XgemmDirectSetConstraints, XgemmDirectComputeLocalMemSize<T>, XgemmDirectSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneXgemmDirect<half>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemmDirect<float>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemmDirect<double>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemmDirect<float2>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneXgemmDirect<double2>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneCopy(RawCommandQueue * queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 0, CopyGetTunerDefaults, CopyGetTunerSettings<T>,
+ CopyTestValidArguments<T>, CopySetConstraints, CopyComputeLocalMemSize<T>, CopySetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneCopy<half>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneCopy<float>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneCopy<double>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneCopy<float2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneCopy<double2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TunePad(RawCommandQueue * queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 0, PadGetTunerDefaults, PadGetTunerSettings<T>,
+ PadTestValidArguments<T>, PadSetConstraints, PadComputeLocalMemSize<T>, PadSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TunePad<half>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePad<float>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePad<double>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePad<float2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePad<double2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneTranspose(RawCommandQueue * queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 0, TransposeGetTunerDefaults, TransposeGetTunerSettings<T>,
+ TransposeTestValidArguments<T>, TransposeSetConstraints, TransposeComputeLocalMemSize<T>, TransposeSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneTranspose<half>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneTranspose<float>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneTranspose<double>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneTranspose<float2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneTranspose<double2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TunePadtranspose(RawCommandQueue * queue, const size_t m, const size_t n,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 0, PadtransposeGetTunerDefaults, PadtransposeGetTunerSettings<T>,
+ PadtransposeTestValidArguments<T>, PadtransposeSetConstraints, PadtransposeComputeLocalMemSize<T>, PadtransposeSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TunePadtranspose<half>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePadtranspose<float>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePadtranspose<double>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePadtranspose<float2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TunePadtranspose<double2>(RawCommandQueue*, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+template <typename T>
+StatusCode TuneInvert(RawCommandQueue * queue, const size_t m, const size_t n, const size_t k,
+ const double fraction, std::unordered_map<std::string,size_t> &parameters) {
+ auto args = Arguments<T>(); args.fraction = fraction; args.m = m; args.n = n; args.k = k;
+ auto queue_cpp = Queue(*queue);
+ return TunerAPI<T>(queue_cpp, args, 0, InvertGetTunerDefaults, InvertGetTunerSettings<T>,
+ InvertTestValidArguments<T>, InvertSetConstraints, InvertComputeLocalMemSize<T>, InvertSetArguments<T>, parameters);
+}
+template StatusCode PUBLIC_API TuneInvert<half>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneInvert<float>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneInvert<double>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneInvert<float2>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+template StatusCode PUBLIC_API TuneInvert<double2>(RawCommandQueue*, const size_t, const size_t, const size_t, const double, std::unordered_map<std::string,size_t>&);
+
+// =================================================================================================
+
+// The main tuner API, similar to the one in tuning.cpp, but without I/O
+template <typename T>
+StatusCode TunerAPI(Queue &queue, const Arguments<T> &args, const int V,
+ const GetTunerDefaultsFunc GetTunerDefaults,
+ const GetTunerSettingsFunc<T> GetTunerSettings,
+ const TestValidArgumentsFunc<T> TestValidArguments,
+ const SetConstraintsFunc SetConstraints,
+ const ComputeLocalMemSizeFunc<T> ComputeLocalMemSize,
+ const SetArgumentsFunc<T> SetArguments,
+ std::unordered_map<std::string,size_t> &parameters) {
+
+ // Sets the parameters and platform/device for which to tune (command-line options)
+ const TunerDefaults defaults = GetTunerDefaults(V);
+ const TunerSettings settings = GetTunerSettings(V, args);
+
+ // Tests validity of the given arguments
+ TestValidArguments(V, args);
+
+ // Retrieves OpenCL classes
+ const auto device = queue.GetDevice();
+ const auto context = queue.GetContext();
+
+ // Inspects whether or not FP64 is supported in case of double precision
+ if ((PrecisionValue<T>() == Precision::kDouble && !PrecisionSupported<double>(device)) ||
+ (PrecisionValue<T>() == Precision::kComplexDouble && !PrecisionSupported<double2>(device))) {
+ return StatusCode::kNoDoublePrecision;
+ }
+
+ // As above, but for FP16 (half precision)
+ if (PrecisionValue<T>() == Precision::kHalf && !PrecisionSupported<half>(device)) {
+ return StatusCode::kNoHalfPrecision;
+ }
+
+ // Retrieves properties
+ const auto device_type = GetDeviceType(device);
+ const auto device_vendor = GetDeviceVendor(device);
+ const auto device_architecture = GetDeviceArchitecture(device);
+ const auto device_name = GetDeviceName(device);
+
+ // Creates input buffers with random data
+ const auto buffer_sizes = std::vector<size_t>{
+ settings.size_x, settings.size_y,
+ settings.size_a, settings.size_b, settings.size_c,
+ settings.size_temp
+ };
+ const auto seed = static_cast<unsigned long>(time(nullptr));
+ std::mt19937 mt(seed);
+ std::uniform_real_distribution<double> dist(kTestDataLowerLimit, kTestDataUpperLimit);
+ auto source_buffers = std::vector<std::vector<T>>();
+ auto reference_buffers = std::vector<std::vector<T>>();
+ auto result_buffers = std::vector<std::vector<T>>();
+ auto device_buffers = std::vector<Buffer<T>>();
+ for (const auto size : buffer_sizes) {
+ auto host_buffer = std::vector<T>(size);
+ PopulateVector(host_buffer, mt, dist);
+ source_buffers.push_back(host_buffer);
+ reference_buffers.push_back(std::vector<T>(size));
+ result_buffers.push_back(std::vector<T>(size));
+ device_buffers.push_back(Buffer<T>(context, size));
+ }
+
+ // Sets the tunable parameters and their possible values
+ auto configurations = SetConfigurations(device, settings.parameters,
+ SetConstraints(V), ComputeLocalMemSize(V));
+
+ // Select the search method (full search or a random fraction)
+ if (args.fraction != 0.0 && args.fraction != 1.0) {
+ const auto new_size = static_cast<size_t>(configurations.size() * args.fraction);
+ auto rng = std::default_random_engine{};
+ std::shuffle(std::begin(configurations), std::end(configurations), rng);
+ configurations.resize(new_size);
+ }
+
+ // First runs a reference example to compare against
+ try {
+
+ // Sets the input
+ for (const auto id : settings.inputs) {
+ device_buffers[id].Write(queue, buffer_sizes[id], source_buffers[id]);
+ }
+
+ // Compiles the kernel
+ auto compiler_options = std::vector<std::string>();
+ const auto program = CompileFromSource(settings.sources, args.precision, settings.kernel_name,
+ device, context, compiler_options, 0);
+ auto kernel = Kernel(program, settings.kernel_name);
+ SetArguments(V, kernel, args, device_buffers);
+
+ // Runs the kernel
+ const auto time_ms = TimeKernel(args.num_runs, kernel, queue, device,
+ settings.global_size_ref, settings.local_size_ref, true);
+ if (time_ms == -1.0) { throw std::runtime_error("Error in reference implementation"); }
+
+ // Saves the result
+ for (const auto id : settings.outputs) {
+ device_buffers[id].Read(queue, buffer_sizes[id], reference_buffers[id]);
+ }
+ }
+ catch (...) {
+ const auto status_code = DispatchExceptionCatchAll(true);
+ return status_code;
+ }
+
+ // Starts the tuning process
+ auto results = std::vector<TuningResult>();
+ for (auto config_id = size_t{0}; config_id < configurations.size(); ++config_id) {
+ try {
+ auto configuration = configurations[config_id];
+
+ // Sets the input
+ for (const auto id : settings.inputs) {
+ device_buffers[id].Write(queue, buffer_sizes[id], source_buffers[id]);
+ }
+
+ // Sets the thread configuration
+ const auto global = SetThreadConfiguration(configuration, settings.global_size,
+ settings.mul_global, settings.div_global);
+ const auto local = SetThreadConfiguration(configuration, settings.local_size,
+ settings.mul_local, settings.div_local);
+
+ // Sets the parameters for this configuration
+ auto kernel_source = std::string{""};
+ for (const auto &parameter : configuration) {
+ kernel_source += "#define " + parameter.first + " " + ToString(parameter.second) + "\n";
+ }
+ kernel_source += settings.sources;
+
+ // Compiles the kernel
+ auto compiler_options = std::vector<std::string>();
+ const auto program = CompileFromSource(kernel_source, args.precision, settings.kernel_name,
+ device, context, compiler_options, 0, true);
+ auto kernel = Kernel(program, settings.kernel_name);
+
+ // Runs the kernel
+ SetArguments(V, kernel, args, device_buffers);
+ const auto time_ms = TimeKernel(args.num_runs, kernel, queue, device, global, local, true);
+
+ // Kernel run was not successful
+ if (time_ms == -1.0) {
+ continue;
+ }
+
+ // Compares the results
+ auto l2_error = 0.0;
+ for (const auto id : settings.outputs) {
+ device_buffers[id].Read(queue, buffer_sizes[id], result_buffers[id]);
+ for (auto index = size_t{0}; index<buffer_sizes[id]; ++index) {
+ const auto diff = SquaredDifference(result_buffers[id][index], reference_buffers[id][index]);
+ l2_error += diff;
+ }
+ l2_error /= static_cast<double>(buffer_sizes[id]);
+ if (std::isnan(l2_error) || l2_error > 1.0e-4) {
+ throw std::runtime_error("L2 error too large");
+ }
+ }
+ results.push_back(TuningResult{settings.kernel_name, time_ms, configuration});
+ }
+ catch (...) {
+ }
+ }
+
+ // Completed the tuning process
+ if (results.size() == 0) { return StatusCode::kUnexpectedError; }
+
+ // Computes the best results
+ auto comparison = [](const TuningResult& lhs, const TuningResult& rhs) { return lhs.score < rhs.score; };
+ const auto best_configuration = std::min_element(results.begin(), results.end(), comparison);
+ const auto best_time_ms = best_configuration->score;
+ if (best_time_ms == 0.0) { return StatusCode::kUnexpectedError; }
+
+ // Stores the best parameters
+ for (const auto config : best_configuration->config) {
+ parameters[config.first] = config.second;
+ }
+ return StatusCode::kSuccess;
+}
+
+// Compiles the above function
+template StatusCode TunerAPI<half>(Queue &queue, const Arguments<half> &args, const int V, const GetTunerDefaultsFunc GetTunerDefaults, const GetTunerSettingsFunc<half> GetTunerSettings, const TestValidArgumentsFunc<half> TestValidArguments, const SetConstraintsFunc SetConstraints, const ComputeLocalMemSizeFunc<half> ComputeLocalMemSize, const SetArgumentsFunc<half> SetArguments, std::unordered_map<std::string,size_t>&);
+template StatusCode TunerAPI<float>(Queue &queue, const Arguments<float> &args, const int V, const GetTunerDefaultsFunc GetTunerDefaults, const GetTunerSettingsFunc<float> GetTunerSettings, const TestValidArgumentsFunc<float> TestValidArguments, const SetConstraintsFunc SetConstraints, const ComputeLocalMemSizeFunc<float> ComputeLocalMemSize, const SetArgumentsFunc<float> SetArguments, std::unordered_map<std::string,size_t>&);
+template StatusCode TunerAPI<double>(Queue &queue, const Arguments<double> &args, const int V, const GetTunerDefaultsFunc GetTunerDefaults, const GetTunerSettingsFunc<double> GetTunerSettings, const TestValidArgumentsFunc<double> TestValidArguments, const SetConstraintsFunc SetConstraints, const ComputeLocalMemSizeFunc<double> ComputeLocalMemSize, const SetArgumentsFunc<double> SetArguments, std::unordered_map<std::string,size_t>&);
+template StatusCode TunerAPI<float2>(Queue &queue, const Arguments<float2> &args, const int V, const GetTunerDefaultsFunc GetTunerDefaults, const GetTunerSettingsFunc<float2> GetTunerSettings, const TestValidArgumentsFunc<float2> TestValidArguments, const SetConstraintsFunc SetConstraints, const ComputeLocalMemSizeFunc<float2> ComputeLocalMemSize, const SetArgumentsFunc<float2> SetArguments, std::unordered_map<std::string,size_t>&);
+template StatusCode TunerAPI<double2>(Queue &queue, const Arguments<double2> &args, const int V, const GetTunerDefaultsFunc GetTunerDefaults, const GetTunerSettingsFunc<double2> GetTunerSettings, const TestValidArgumentsFunc<double2> TestValidArguments, const SetConstraintsFunc SetConstraints, const ComputeLocalMemSizeFunc<double2> ComputeLocalMemSize, const SetArgumentsFunc<double2> SetArguments, std::unordered_map<std::string,size_t>&);
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/utilities/timing.cpp b/src/utilities/timing.cpp
index af6a8ff2..1afb0d08 100644
--- a/src/utilities/timing.cpp
+++ b/src/utilities/timing.cpp
@@ -62,15 +62,16 @@ double RunKernelTimed(const size_t num_runs, Kernel &kernel, Queue &queue, const
}
double TimeKernel(const size_t num_runs, Kernel &kernel, Queue &queue, const Device &device,
- std::vector<size_t> global, const std::vector<size_t> &local) {
+ std::vector<size_t> global, const std::vector<size_t> &local,
+ const bool silent) {
try {
const auto time_ms = RunKernelTimed(num_runs, kernel, queue, device, global, local);
- printf(" %9.2lf ms |", time_ms);
+ if (!silent) { printf(" %9.2lf ms |", time_ms); }
return time_ms;
}
catch (...) {
const auto status_code = DispatchExceptionCatchAll(true);
- printf(" error %-5d |", static_cast<int>(status_code));
+ if (!silent) { printf(" error %-5d |", static_cast<int>(status_code)); }
return -1.0; // invalid
}
}
diff --git a/src/utilities/timing.hpp b/src/utilities/timing.hpp
index c167cd5f..7761fd83 100644
--- a/src/utilities/timing.hpp
+++ b/src/utilities/timing.hpp
@@ -44,7 +44,8 @@ double RunKernelTimed(const size_t num_runs, Kernel &kernel, Queue &queue, const
std::vector<size_t> global, const std::vector<size_t> &local);
double TimeKernel(const size_t num_runs, Kernel &kernel, Queue &queue, const Device &device,
- std::vector<size_t> global, const std::vector<size_t> &local);
+ std::vector<size_t> global, const std::vector<size_t> &local,
+ const bool silent = false);
// =================================================================================================
diff --git a/test/correctness/misc/preprocessor.cpp b/test/correctness/misc/preprocessor.cpp
index 7944e868..c5d115d3 100644
--- a/test/correctness/misc/preprocessor.cpp
+++ b/test/correctness/misc/preprocessor.cpp
@@ -245,12 +245,14 @@ size_t RunPreprocessor(int argc, char *argv[], const bool silent, const Precisio
if (TestKernel(device, context, "XgemmDirectTN", gemm_direct_sources, precision)) { passed++; } else { errors++; }
// HEMM
- const auto herm_sources =
- "#define ROUTINE_HEMM\n"
- #include "../src/kernels/level3/level3.opencl"
- #include "../src/kernels/level3/convert_hermitian.opencl"
- ;
- if (TestKernel(device, context, "HermLowerToSquared", herm_sources, precision)) { passed++; } else { errors++; }
+ if (precision == Precision::kComplexSingle || precision == Precision::kComplexDouble) {
+ const auto herm_sources =
+ "#define ROUTINE_HEMM\n"
+ #include "../src/kernels/level3/level3.opencl"
+ #include "../src/kernels/level3/convert_hermitian.opencl"
+ ;
+ if (TestKernel(device, context, "HermLowerToSquared", herm_sources, precision)) { passed++; } else { errors++; }
+ }
// Prints and returns the statistics
std::cout << std::endl;