summaryrefslogtreecommitdiff
path: root/src/tuning
diff options
context:
space:
mode:
authorCNugteren <web@cedricnugteren.nl>2015-05-30 12:30:43 +0200
committerCNugteren <web@cedricnugteren.nl>2015-05-30 12:30:43 +0200
commitbc5a341dfe591946e925db315fc7d8c0c25c2938 (patch)
treeb216ab5eee4863e3807d92b5ddd19fa22197ed22 /src/tuning
parentc7b054ea6747039f4405fd93da6e924f3e5c7f4b (diff)
Initial commit of preview version
Diffstat (limited to 'src/tuning')
-rw-r--r--src/tuning/copy.cc83
-rw-r--r--src/tuning/pad.cc90
-rw-r--r--src/tuning/padtranspose.cc95
-rw-r--r--src/tuning/transpose.cc88
-rw-r--r--src/tuning/tuning.cc186
-rw-r--r--src/tuning/xaxpy.cc88
-rw-r--r--src/tuning/xgemm.cc126
7 files changed, 756 insertions, 0 deletions
diff --git a/src/tuning/copy.cc b/src/tuning/copy.cc
new file mode 100644
index 00000000..da223bf0
--- /dev/null
+++ b/src/tuning/copy.cc
@@ -0,0 +1,83 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements an auto-tuner to tune the copy OpenCL kernels. It uses CLTune.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+#include <stdexcept>
+
+#include "internal/utilities.h"
+#include "internal/tuning.h"
+
+namespace clblast {
+// =================================================================================================
+
+// The copy auto-tuner
+template <typename T>
+void CopyTune(const Arguments<T> &args,
+ const std::vector<T> &a_mat, std::vector<T> &b_mat,
+ cltune::Tuner &tuner) {
+
+ // This points to the CopyMatrix kernel as found in the CLBlast library. This is just one example
+ // of a copy kernel. However, all copy-kernels use the same tuning parameters, so one has to be
+ // chosen as a representative.
+ std::string common_source =
+ #include "../src/kernels/common.opencl"
+ std::string kernel_source =
+ #include "../src/kernels/copy.opencl"
+ auto sources = common_source + kernel_source;
+ auto id = tuner.AddKernelFromString(sources, "CopyMatrix", {args.m, args.n}, {1, 1});
+ tuner.SetReferenceFromString(sources, "CopyMatrix", {args.m, args.n}, {8, 8});
+
+ // Sets the tunable parameters and their possible values
+ tuner.AddParameter(id, "COPY_DIMX", {8, 16, 32});
+ tuner.AddParameter(id, "COPY_DIMY", {8, 16, 32});
+ tuner.AddParameter(id, "COPY_WPT", {1, 2, 4, 8});
+ tuner.AddParameter(id, "COPY_VW", {1, 2, 4, 8});
+
+ // Tests for a specific precision
+ tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
+ tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
+
+ // Modifies the thread-sizes (both global and local) based on the parameters
+ tuner.MulLocalSize(id, {"COPY_DIMX", "COPY_DIMY"});
+ tuner.DivGlobalSize(id, {"COPY_VW", "COPY_WPT"});
+
+ // Sets the function's arguments
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentInput(a_mat);
+ tuner.AddArgumentOutput(b_mat);
+}
+
+// =================================================================================================
+
+// Main function which calls the common client code with the routine-specific function as argument.
+void TunerCopy(int argc, char *argv[]) {
+ switch(GetPrecision(argc, argv)) {
+ case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
+ case Precision::kSingle: TunerAB<float>(argc, argv, CopyTune<float>); break;
+ case Precision::kDouble: TunerAB<double>(argc, argv, CopyTune<double>); break;
+ case Precision::kComplexSingle: TunerAB<float2>(argc, argv, CopyTune<float2>); break;
+ case Precision::kComplexDouble: TunerAB<double2>(argc, argv, CopyTune<double2>); break;
+ }
+}
+
+// =================================================================================================
+} // namespace clblast
+
+// Main function (not within the clblast namespace)
+int main(int argc, char *argv[]) {
+ clblast::TunerCopy(argc, argv);
+ return 0;
+}
+
+// =================================================================================================
diff --git a/src/tuning/pad.cc b/src/tuning/pad.cc
new file mode 100644
index 00000000..93312df2
--- /dev/null
+++ b/src/tuning/pad.cc
@@ -0,0 +1,90 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements an auto-tuner to tune the pad-copy OpenCL kernels. It uses CLTune.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+#include <stdexcept>
+
+#include "internal/utilities.h"
+#include "internal/tuning.h"
+
+namespace clblast {
+// =================================================================================================
+
+// The pad auto-tuner
+template <typename T>
+void PadTune(const Arguments<T> &args,
+ const std::vector<T> &a_mat, std::vector<T> &b_mat,
+ cltune::Tuner &tuner) {
+
+ // This points to the PadMatrix kernel as found in the CLBlast library. This is just one
+ // example of a pad kernel. However, all pad-kernels use the same tuning parameters, so one has
+ // to be chosen as a representative.
+ std::string common_source =
+ #include "../src/kernels/common.opencl"
+ std::string kernel_source =
+ #include "../src/kernels/pad.opencl"
+ auto sources = common_source + kernel_source;
+ auto id = tuner.AddKernelFromString(sources, "PadMatrix", {args.m, args.n}, {1, 1});
+ tuner.SetReferenceFromString(sources, "PadMatrix", {args.m, args.n}, {8, 8});
+
+ // Sets the tunable parameters and their possible values
+ tuner.AddParameter(id, "PAD_DIMX", {8, 16, 32});
+ tuner.AddParameter(id, "PAD_DIMY", {8, 16, 32});
+ tuner.AddParameter(id, "PAD_WPTX", {1, 2, 4});
+ tuner.AddParameter(id, "PAD_WPTY", {1, 2, 4});
+
+ // Tests for a specific precision
+ tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
+ tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
+
+ // Modifies the thread-sizes (both global and local) based on the parameters
+ tuner.MulLocalSize(id, {"PAD_DIMX", "PAD_DIMY"});
+ tuner.DivGlobalSize(id, {"PAD_WPTX", "PAD_WPTY"});
+
+ // Sets the function's arguments
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(static_cast<int>(args.n));
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(0);
+ tuner.AddArgumentInput(a_mat);
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(static_cast<int>(args.n));
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(0);
+ tuner.AddArgumentOutput(b_mat);
+}
+
+// =================================================================================================
+
+// Main function which calls the common client code with the routine-specific function as argument.
+void TunerPad(int argc, char *argv[]) {
+ switch(GetPrecision(argc, argv)) {
+ case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
+ case Precision::kSingle: TunerAB<float>(argc, argv, PadTune<float>); break;
+ case Precision::kDouble: TunerAB<double>(argc, argv, PadTune<double>); break;
+ case Precision::kComplexSingle: TunerAB<float2>(argc, argv, PadTune<float2>); break;
+ case Precision::kComplexDouble: TunerAB<double2>(argc, argv, PadTune<double2>); break;
+ }
+}
+
+// =================================================================================================
+} // namespace clblast
+
+// Main function (not within the clblast namespace)
+int main(int argc, char *argv[]) {
+ clblast::TunerPad(argc, argv);
+ return 0;
+}
+
+// =================================================================================================
diff --git a/src/tuning/padtranspose.cc b/src/tuning/padtranspose.cc
new file mode 100644
index 00000000..b2af9925
--- /dev/null
+++ b/src/tuning/padtranspose.cc
@@ -0,0 +1,95 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements an auto-tuner to tune the pad-transpose OpenCL kernels. It uses CLTune.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+#include <stdexcept>
+
+#include "internal/utilities.h"
+#include "internal/tuning.h"
+
+namespace clblast {
+// =================================================================================================
+
+// The transpose auto-tuner
+template <typename T>
+void PadTransposeTune(const Arguments<T> &args,
+ const std::vector<T> &a_mat, std::vector<T> &b_mat,
+ cltune::Tuner &tuner) {
+
+ // This points to the PadTransposeMatrix kernel as found in the CLBlast library. This is just one
+ // example of a transpose kernel. However, all kernels use the same tuning parameters, so one has
+ // to be chosen as a representative.
+ std::string common_source =
+ #include "../src/kernels/common.opencl"
+ std::string kernel_source =
+ #include "../src/kernels/padtranspose.opencl"
+ auto sources = common_source + kernel_source;
+ auto id = tuner.AddKernelFromString(sources, "PadTransposeMatrix", {args.m, args.n}, {1, 1});
+ tuner.SetReferenceFromString(sources, "PadTransposeMatrix", {args.m, args.n}, {8, 8});
+
+ // Sets the tunable parameters and their possible values
+ tuner.AddParameter(id, "PADTRA_TILE", {8, 16, 32, 64});
+ tuner.AddParameter(id, "PADTRA_WPT", {1, 2, 4, 8, 16});
+ tuner.AddParameter(id, "PADTRA_PAD", {0, 1});
+
+ // Tests for a specific precision
+ tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
+ tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
+
+ // Sets the constraints for local memory size limitations
+ auto LocalMemorySize = [args] (std::vector<size_t> v) {
+ return ((v[0]*v[1]*(v[0]*v[1]+v[2]))*GetBytes(args.precision));
+ };
+ tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"PADTRA_TILE", "PADTRA_WPT", "PADTRA_PAD"});
+
+ // Modifies the thread-sizes (both global and local) based on the parameters
+ tuner.DivGlobalSize(id, {"PADTRA_WPT", "PADTRA_WPT"});
+ tuner.MulLocalSize(id, {"PADTRA_TILE", "PADTRA_TILE"});
+
+ // Sets the function's arguments
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(static_cast<int>(args.n));
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(0);
+ tuner.AddArgumentInput(a_mat);
+ tuner.AddArgumentScalar(static_cast<int>(args.n));
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(static_cast<int>(args.n));
+ tuner.AddArgumentScalar(0);
+ tuner.AddArgumentOutput(b_mat);
+}
+
+// =================================================================================================
+
+// Main function which calls the common client code with the routine-specific function as argument.
+void TunerPadTranspose(int argc, char *argv[]) {
+ switch(GetPrecision(argc, argv)) {
+ case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
+ case Precision::kSingle: TunerAB<float>(argc, argv, PadTransposeTune<float>); break;
+ case Precision::kDouble: TunerAB<double>(argc, argv, PadTransposeTune<double>); break;
+ case Precision::kComplexSingle: TunerAB<float2>(argc, argv, PadTransposeTune<float2>); break;
+ case Precision::kComplexDouble: TunerAB<double2>(argc, argv, PadTransposeTune<double2>); break;
+ }
+}
+
+// =================================================================================================
+} // namespace clblast
+
+// Main function (not within the clblast namespace)
+int main(int argc, char *argv[]) {
+ clblast::TunerPadTranspose(argc, argv);
+ return 0;
+}
+
+// =================================================================================================
diff --git a/src/tuning/transpose.cc b/src/tuning/transpose.cc
new file mode 100644
index 00000000..90392866
--- /dev/null
+++ b/src/tuning/transpose.cc
@@ -0,0 +1,88 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements an auto-tuner to tune the transpose OpenCL kernels. It uses CLTune.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+#include <stdexcept>
+
+#include "internal/utilities.h"
+#include "internal/tuning.h"
+
+namespace clblast {
+// =================================================================================================
+
+// The transpose auto-tuner
+template <typename T>
+void TransposeTune(const Arguments<T> &args,
+ const std::vector<T> &a_mat, std::vector<T> &b_mat,
+ cltune::Tuner &tuner) {
+
+ // This points to the PadTransposeMatrix kernel as found in the CLBlast library. This is just one
+ // example of a transpose kernel. However, all kernels use the same tuning parameters, so one has
+ // to be chosen as a representative.
+ std::string common_source =
+ #include "../src/kernels/common.opencl"
+ std::string kernel_source =
+ #include "../src/kernels/transpose.opencl"
+ auto sources = common_source + kernel_source;
+ auto id = tuner.AddKernelFromString(sources, "TransposeMatrix", {args.m, args.n}, {1, 1});
+ tuner.SetReferenceFromString(sources, "TransposeMatrix", {args.m, args.n}, {8, 8});
+
+ // Sets the tunable parameters and their possible values
+ tuner.AddParameter(id, "TRA_DIM", {4, 8, 16, 32, 64});
+ tuner.AddParameter(id, "TRA_WPT", {1, 2, 4, 8, 16});
+ tuner.AddParameter(id, "TRA_PAD", {0, 1});
+
+ // Tests for a specific precision
+ tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
+ tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
+
+ // Sets the constraints for local memory size limitations
+ auto LocalMemorySize = [args] (std::vector<size_t> v) {
+ return ((v[0]*v[1]*(v[0]*v[1]+v[2]))*GetBytes(args.precision));
+ };
+ tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"TRA_DIM", "TRA_WPT", "TRA_PAD"});
+
+ // Modifies the thread-sizes (both global and local) based on the parameters
+ tuner.DivGlobalSize(id, {"TRA_WPT", "TRA_WPT"});
+ tuner.MulLocalSize(id, {"TRA_DIM", "TRA_DIM"});
+
+ // Sets the function's arguments
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentInput(a_mat);
+ tuner.AddArgumentOutput(b_mat);
+}
+
+// =================================================================================================
+
+// Main function which calls the common client code with the routine-specific function as argument.
+void TunerTranspose(int argc, char *argv[]) {
+ switch(GetPrecision(argc, argv)) {
+ case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
+ case Precision::kSingle: TunerAB<float>(argc, argv, TransposeTune<float>); break;
+ case Precision::kDouble: TunerAB<double>(argc, argv, TransposeTune<double>); break;
+ case Precision::kComplexSingle: TunerAB<float2>(argc, argv, TransposeTune<float2>); break;
+ case Precision::kComplexDouble: TunerAB<double2>(argc, argv, TransposeTune<double2>); break;
+ }
+}
+
+// =================================================================================================
+} // namespace clblast
+
+// Main function (not within the clblast namespace)
+int main(int argc, char *argv[]) {
+ clblast::TunerTranspose(argc, argv);
+ return 0;
+}
+
+// =================================================================================================
diff --git a/src/tuning/tuning.cc b/src/tuning/tuning.cc
new file mode 100644
index 00000000..bb93c053
--- /dev/null
+++ b/src/tuning/tuning.cc
@@ -0,0 +1,186 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the common auto-tuning code to interface with the CLTune library.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+
+#include "internal/utilities.h"
+#include "internal/tuning.h"
+
+namespace clblast {
+// =================================================================================================
+
+// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
+// the results. Used for vector-vector routines.
+template <typename T>
+void TunerXY(int argc, char* argv[], const Tuner2<T> &tune_function) {
+
+ // Sets the parameters and platform/device for which to tune (command-line options)
+ auto help = std::string{"* Options given/available:\n"};
+ auto args = Arguments<T>{};
+ args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
+ args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
+ args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
+ args.n = GetArgument(argc, argv, help, kArgN, size_t{4096*1024});
+ args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar<T>());
+ fprintf(stdout, "%s\n", help.c_str());
+
+ // Creates input buffers with random data
+ auto x_vec = std::vector<T>(args.n);
+ auto y_vec = std::vector<T>(args.n);
+ PopulateVector(x_vec);
+ PopulateVector(y_vec);
+
+ // Initializes the tuner for the chosen device
+ cltune::Tuner tuner(args.platform_id, args.device_id);
+
+ // Use full-search to explore all parameter combinations.
+ tuner.UseFullSearch();
+
+ // Configures the tuning parameters (kernel specific)
+ tune_function(args, x_vec, y_vec, tuner);
+
+ // Starts the tuning process
+ tuner.Tune();
+
+ // Prints the results to screen
+ auto time_ms = tuner.PrintToScreen();
+ tuner.PrintFormatted();
+
+ // Also prints the performance of the best-case in terms of GB/s
+ const auto mega_bytes = (3*args.n*GetBytes(args.precision)) * 1.0e-6;
+ if (time_ms != 0.0) {
+ printf("[ -------> ] %.1lf ms or %.1lf GB/s\n", time_ms, mega_bytes/time_ms);
+ }
+}
+
+// Compiles the above function
+template void TunerXY<float>(int, char**, const Tuner2<float>&);
+template void TunerXY<double>(int, char**, const Tuner2<double>&);
+template void TunerXY<float2>(int, char**, const Tuner2<float2>&);
+template void TunerXY<double2>(int, char**, const Tuner2<double2>&);
+
+// =================================================================================================
+
+// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
+// the results. Used for matrix-matrix routines.
+template <typename T>
+void TunerAB(int argc, char* argv[], const Tuner2<T> &tune_function) {
+
+ // Sets the parameters and platform/device for which to tune (command-line options)
+ auto help = std::string{"* Options given/available:\n"};
+ auto args = Arguments<T>{};
+ args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
+ args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
+ args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
+ args.m = GetArgument(argc, argv, help, kArgM, size_t{1024});
+ args.n = GetArgument(argc, argv, help, kArgN, size_t{1024});
+ args.fraction = GetArgument(argc, argv, help, kArgFraction, 2048.0);
+ fprintf(stdout, "%s\n", help.c_str());
+
+ // Creates input buffers with random data
+ auto a_mat = std::vector<T>(args.m * args.n);
+ auto b_mat = std::vector<T>(args.m * args.n);
+ PopulateVector(a_mat);
+ PopulateVector(b_mat);
+
+ // Initializes the tuner for the chosen device
+ cltune::Tuner tuner(args.platform_id, args.device_id);
+
+ // Use full-search to explore all parameter combinations.
+ tuner.UseFullSearch();
+
+ // Configures the tuning parameters (kernel specific)
+ tune_function(args, a_mat, b_mat, tuner);
+
+ // Starts the tuning process
+ tuner.Tune();
+
+ // Prints the results to screen
+ auto time_ms = tuner.PrintToScreen();
+ tuner.PrintFormatted();
+
+ // Also prints the performance of the best-case in terms of GB/s
+ const auto mega_bytes = (2*args.m*args.n*GetBytes(args.precision)) * 1.0e-6;
+ if (time_ms != 0.0) {
+ printf("[ -------> ] %.1lf ms or %.1lf GB/s\n", time_ms, mega_bytes/time_ms);
+ }
+}
+
+// Compiles the above function
+template void TunerAB<float>(int, char**, const Tuner2<float>&);
+template void TunerAB<double>(int, char**, const Tuner2<double>&);
+template void TunerAB<float2>(int, char**, const Tuner2<float2>&);
+template void TunerAB<double2>(int, char**, const Tuner2<double2>&);
+
+// =================================================================================================
+
+// Function to get command-line argument, set-up the input buffers, configure the tuner, and collect
+// the results. Used for matrix-matrix-matrix routines.
+template <typename T>
+void TunerABC(int argc, char* argv[], const Tuner3<T> &tune_function) {
+
+ // Sets the parameters and platform/device for which to tune (command-line options)
+ auto help = std::string{"* Options given/available:\n"};
+ auto args = Arguments<T>{};
+ args.platform_id = GetArgument(argc, argv, help, kArgPlatform, size_t{0});
+ args.device_id = GetArgument(argc, argv, help, kArgDevice, size_t{0});
+ args.precision = GetArgument(argc, argv, help, kArgPrecision, Precision::kSingle);
+ args.m = GetArgument(argc, argv, help, kArgM, size_t{1024});
+ args.n = GetArgument(argc, argv, help, kArgN, size_t{1024});
+ args.k = GetArgument(argc, argv, help, kArgK, size_t{1024});
+ args.alpha = GetArgument(argc, argv, help, kArgAlpha, GetScalar<T>());
+ args.beta = GetArgument(argc, argv, help, kArgBeta, GetScalar<T>());
+ args.fraction = GetArgument(argc, argv, help, kArgFraction, 2048.0);
+ fprintf(stdout, "%s\n", help.c_str());
+
+ // Creates input buffers with random data
+ auto a_mat = std::vector<T>(args.m * args.k);
+ auto b_mat = std::vector<T>(args.n * args.k);
+ auto c_mat = std::vector<T>(args.m * args.n);
+ PopulateVector(a_mat);
+ PopulateVector(b_mat);
+ PopulateVector(c_mat);
+
+ // Initializes the tuner for the chosen device
+ cltune::Tuner tuner(args.platform_id, args.device_id);
+
+ // Use random-search to search only a part of the parameter values. The fraction of the search-
+ // space to explore is set as a command-line argument.
+ tuner.UseRandomSearch(1.0/args.fraction);
+
+ // Configures the tuning parameters (kernel specific)
+ tune_function(args, a_mat, b_mat, c_mat, tuner);
+
+ // Starts the tuning process
+ tuner.Tune();
+
+ // Prints the results to screen
+ auto time_ms = tuner.PrintToScreen();
+ tuner.PrintFormatted();
+
+ // Also prints the performance of the best-case in terms of GFLOPS
+ const auto mega_flops = (2*args.m*args.n*args.k) * 1.0e-6;
+ if (time_ms != 0.0) {
+ printf("[ -------> ] %.1lf ms or %.1lf GFLOPS\n", time_ms, mega_flops/time_ms);
+ }
+}
+
+// Compiles the above function
+template void TunerABC<float>(int, char**, const Tuner3<float>&);
+template void TunerABC<double>(int, char**, const Tuner3<double>&);
+template void TunerABC<float2>(int, char**, const Tuner3<float2>&);
+template void TunerABC<double2>(int, char**, const Tuner3<double2>&);
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/tuning/xaxpy.cc b/src/tuning/xaxpy.cc
new file mode 100644
index 00000000..0439ed05
--- /dev/null
+++ b/src/tuning/xaxpy.cc
@@ -0,0 +1,88 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements an auto-tuner to tune the Xaxpy OpenCL kernel. It uses the CLTune library.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+#include <stdexcept>
+
+#include "internal/utilities.h"
+#include "internal/tuning.h"
+
+namespace clblast {
+// =================================================================================================
+
+// The Xaxpy auto-tuner
+template <typename T>
+void XaxpyTune(const Arguments<T> &args,
+ const std::vector<T> &x_vec, std::vector<T> &y_vec,
+ cltune::Tuner &tuner) {
+
+ // The XaxpyFast kernel only works under certain conditions. Check here whether the condition is
+ // true for the reference kernel
+ if (!IsMultiple(args.n, 64)) {
+ throw std::runtime_error("The 'XaxpyFast' kernel requires 'n' to be a multiple of WGS*WPT*VW");
+ }
+
+ // This points to the XaxpyFast kernel as found in the CLBlast library
+ std::string common_source =
+ #include "../src/kernels/common.opencl"
+ std::string kernel_source =
+ #include "../src/kernels/xaxpy.opencl"
+ auto sources = common_source + kernel_source;
+ auto id = tuner.AddKernelFromString(sources, "XaxpyFast", {args.n}, {1});
+ tuner.SetReferenceFromString(sources, "XaxpyFast", {args.n}, {64});
+
+ // Sets the tunable parameters and their possible values
+ tuner.AddParameter(id, "WGS", {64, 128, 256, 512, 1024, 2048});
+ tuner.AddParameter(id, "WPT", {1, 2, 4, 8});
+ tuner.AddParameter(id, "VW", {1, 2, 4, 8});
+
+ // Tests for a specific precision
+ tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
+ tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
+
+ // Modifies the thread-sizes (local) based on the parameters
+ tuner.MulLocalSize(id, {"WGS"});
+ tuner.DivGlobalSize(id, {"WPT"});
+ tuner.DivGlobalSize(id, {"VW"});
+
+ // Sets the function's arguments
+ tuner.AddArgumentScalar(static_cast<int>(args.n));
+ tuner.AddArgumentScalar(args.alpha);
+ tuner.AddArgumentInput(x_vec);
+ tuner.AddArgumentOutput(y_vec);
+}
+
+// =================================================================================================
+
+// Main function which calls the common client code with the routine-specific function as argument.
+void TunerXaxpy(int argc, char *argv[]) {
+ switch(GetPrecision(argc, argv)) {
+ case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
+ case Precision::kSingle: TunerXY<float>(argc, argv, XaxpyTune<float>); break;
+ case Precision::kDouble: TunerXY<double>(argc, argv, XaxpyTune<double>); break;
+ case Precision::kComplexSingle: TunerXY<float2>(argc, argv, XaxpyTune<float2>); break;
+ case Precision::kComplexDouble: TunerXY<double2>(argc, argv, XaxpyTune<double2>); break;
+ }
+}
+
+// =================================================================================================
+} // namespace clblast
+
+// Main function (not within the clblast namespace)
+int main(int argc, char *argv[]) {
+ clblast::TunerXaxpy(argc, argv);
+ return 0;
+}
+
+// =================================================================================================
diff --git a/src/tuning/xgemm.cc b/src/tuning/xgemm.cc
new file mode 100644
index 00000000..aba56810
--- /dev/null
+++ b/src/tuning/xgemm.cc
@@ -0,0 +1,126 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements an auto-tuner to tune the Xgemm OpenCL kernel. It uses the CLTune library.
+// Note that this tuner uses random-search: running it multiple times or with a larger fraction
+// argument might be neccessary to obtain good results.
+//
+// =================================================================================================
+
+#include <string>
+#include <vector>
+#include <stdexcept>
+
+#include "internal/utilities.h"
+#include "internal/tuning.h"
+
+namespace clblast {
+// =================================================================================================
+
+// The Xgemm auto-tuner
+template <typename T>
+void XgemmTune(const Arguments<T> &args,
+ const std::vector<T> &a_mat, const std::vector<T> &b_mat, std::vector<T> &c_mat,
+ cltune::Tuner &tuner) {
+
+ // This points to the Xgemm kernel as found in the CLBlast library and its golden reference
+ std::string common_source =
+ #include "../src/kernels/common.opencl"
+ std::string kernel_source =
+ #include "../src/kernels/xgemm.opencl"
+ auto sources = common_source + kernel_source;
+ auto id = tuner.AddKernelFromString(sources, "Xgemm", {args.m, args.n}, {1, 1});
+ tuner.SetReferenceFromString(sources, "Xgemm", {args.m, args.n}, {8, 8});
+
+ // Sets the tunable parameters and their possible values
+ tuner.AddParameter(id, "MWG", {16, 32, 64, 128});
+ tuner.AddParameter(id, "NWG", {16, 32, 64, 128});
+ tuner.AddParameter(id, "KWG", {16, 32});
+ tuner.AddParameter(id, "MDIMC", {8, 16, 32});
+ tuner.AddParameter(id, "NDIMC", {8, 16, 32});
+ tuner.AddParameter(id, "MDIMA", {8, 16, 32});
+ tuner.AddParameter(id, "NDIMB", {8, 16, 32});
+ tuner.AddParameter(id, "KWI", {2, 8});
+ tuner.AddParameter(id, "VWM", {1, 2, 4, 8});
+ tuner.AddParameter(id, "VWN", {1, 2, 4, 8});
+ tuner.AddParameter(id, "STRM", {0, 1});
+ tuner.AddParameter(id, "STRN", {0, 1});
+ tuner.AddParameter(id, "SA", {0, 1});
+ tuner.AddParameter(id, "SB", {0, 1});
+
+ // Tests for a specific precision
+ tuner.AddParameter(id, "PRECISION", {static_cast<size_t>(args.precision)});
+ tuner.AddParameterReference("PRECISION", static_cast<size_t>(args.precision));
+
+ // Sets the helper functions to implement the constraints below
+ auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
+ auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
+ auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
+
+ // Sets constraints: Requirement for unrolling the KWG loop
+ tuner.AddConstraint(id, MultipleOfX, {"KWG", "KWI"});
+
+ // Sets constraints: Required for integer MWI and NWI
+ tuner.AddConstraint(id, MultipleOfXMulY, {"MWG", "MDIMC", "VWM"});
+ tuner.AddConstraint(id, MultipleOfXMulY, {"NWG", "NDIMC", "VWN"});
+
+ // Sets constraints: Required for integer MWIA and NWIB
+ tuner.AddConstraint(id, MultipleOfXMulY, {"MWG", "MDIMA", "VWM"});
+ tuner.AddConstraint(id, MultipleOfXMulY, {"NWG", "NDIMB", "VWN"});
+
+ // Sets constraints: KWG has to be a multiple of KDIMA = ((MDIMC*NDIMC)/(MDIMA)) and KDIMB = (...)
+ tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "MDIMA"});
+ tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"KWG", "MDIMC", "NDIMC", "NDIMB"});
+
+ // Sets the constraints for local memory size limitations
+ auto LocalMemorySize = [args] (std::vector<size_t> v) {
+ return (((v[0]*v[1]*v[2]/v[3]) + (v[4]*v[5]*v[6]/v[7]))*GetBytes(args.precision));
+ };
+ tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"SA", "KWG", "MWG", "VWM",
+ "SB", "KWG", "NWG", "VWN"});
+
+ // Modifies the thread-sizes (both global and local) based on the parameters
+ tuner.MulLocalSize(id, {"MDIMC", "NDIMC"});
+ tuner.MulGlobalSize(id, {"MDIMC", "NDIMC"});
+ tuner.DivGlobalSize(id, {"MWG", "NWG"});
+
+ // Sets the function's arguments
+ tuner.AddArgumentScalar(static_cast<int>(args.m));
+ tuner.AddArgumentScalar(static_cast<int>(args.n));
+ tuner.AddArgumentScalar(static_cast<int>(args.k));
+ tuner.AddArgumentScalar(args.alpha);
+ tuner.AddArgumentScalar(args.beta);
+ tuner.AddArgumentInput(a_mat);
+ tuner.AddArgumentInput(b_mat);
+ tuner.AddArgumentOutput(c_mat);
+}
+
+// =================================================================================================
+
+// Main function which calls the common client code with the routine-specific function as argument.
+void TunerXgemm(int argc, char *argv[]) {
+ switch(GetPrecision(argc, argv)) {
+ case Precision::kHalf: throw std::runtime_error("Unsupported precision mode");
+ case Precision::kSingle: TunerABC<float>(argc, argv, XgemmTune<float>); break;
+ case Precision::kDouble: TunerABC<double>(argc, argv, XgemmTune<double>); break;
+ case Precision::kComplexSingle: TunerABC<float2>(argc, argv, XgemmTune<float2>); break;
+ case Precision::kComplexDouble: TunerABC<double2>(argc, argv, XgemmTune<double2>); break;
+ }
+}
+
+// =================================================================================================
+} // namespace clblast
+
+// Main function (not within the clblast namespace)
+int main(int argc, char *argv[]) {
+ clblast::TunerXgemm(argc, argv);
+ return 0;
+}
+
+// =================================================================================================