summaryrefslogtreecommitdiff
path: root/src/tuning/kernels/xgemm_direct.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/tuning/kernels/xgemm_direct.cpp')
-rw-r--r--src/tuning/kernels/xgemm_direct.cpp96
1 files changed, 38 insertions, 58 deletions
diff --git a/src/tuning/kernels/xgemm_direct.cpp b/src/tuning/kernels/xgemm_direct.cpp
index 619fb37a..60a983b4 100644
--- a/src/tuning/kernels/xgemm_direct.cpp
+++ b/src/tuning/kernels/xgemm_direct.cpp
@@ -7,7 +7,7 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
-// This file uses the CLTune auto-tuner to tune the direct xgemm kernels. There are two variations:
+// This file uses the auto-tuner to tune the direct xgemm kernels. There are two variations:
// - V==1: This tests some limited set of tuning parameters exhaustively.
// - V==2: This tests a much larger set of tuning parameters by randomly sampling a subset.
//
@@ -36,9 +36,8 @@ class TuneXgemmDirect {
settings.default_m = 256;
settings.default_n = 256;
settings.default_k = 256;
- settings.default_fraction = (V==1) ? 1.0 : 32.0; // test all or sample randomly
+ settings.default_fraction = (V==1) ? 1.0 : 64.0; // test all or sample randomly
settings.default_num_runs = 4;
- settings.default_heuristic = static_cast<size_t>(cltune::SearchMethod::RandomSearch);
return settings;
}
@@ -50,7 +49,6 @@ class TuneXgemmDirect {
settings.kernel_family = (V==1) ? "xgemm_direct_1" : "xgemm_direct_2";
settings.kernel_name = "XgemmDirectTN";
settings.sources =
-#include "../src/kernels/common.opencl"
#include "../src/kernels/level3/xgemm_direct_part1.opencl"
#include "../src/kernels/level3/xgemm_direct_part2.opencl"
#include "../src/kernels/level3/xgemm_direct_part3.opencl"
@@ -61,6 +59,10 @@ class TuneXgemmDirect {
settings.size_b = args.n * args.k;
settings.size_c = args.m * args.n;
+ // Inputs and outputs IDs (X:0, Y:1, A:2, B:3, C:4, temp:5)
+ settings.inputs = {2, 3, 4};
+ settings.outputs = {4};
+
// Sets the base thread configuration
settings.global_size = {args.m, args.n};
settings.global_size_ref = settings.global_size;
@@ -89,7 +91,7 @@ class TuneXgemmDirect {
}
else { // a lot more tuning parameters - has to be sampled randomly, too much to test all
settings.parameters = {
- {"WGD", {8, 16, 32, 64, 128}},
+ {"WGD", {8, 16, 32, 64}},
{"MDIMCD", {8, 16, 32}},
{"NDIMCD", {8, 16, 32}},
{"MDIMAD", {8, 16, 32}},
@@ -106,79 +108,57 @@ class TuneXgemmDirect {
settings.metric_amount = 2 * args.m * args.n * args.k;
settings.performance_unit = "GFLOPS";
- // Returns which search heuristic to use
- if (V==1) { settings.heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch); }
- else {
- // Use full-search to explore all parameter combinations or another strategy to search only a
- // part of the parameter values. The fraction is set as a command-line argument.
- if (args.fraction == 1.0 || args.fraction == 0.0) {
- settings.heuristic = static_cast<size_t>(cltune::SearchMethod::FullSearch);
- } else {
- settings.heuristic = args.heuristic_selection;
- }
- }
-
return settings;
}
// Tests for valid arguments
static void TestValidArguments(const Arguments<T> &) { }
-
- // Sets the constraints
- static void SetConstraints(cltune::Tuner &tuner, const size_t id) {
+ static std::vector<Constraint> SetConstraints() {
+ auto constraints = std::vector<Constraint>();
auto MultipleOfX = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]); };
auto MultipleOfXMulY = [] (std::vector<size_t> v) { return IsMultiple(v[0], v[1]*v[2]); };
auto MultipleOfXMulYDivZ = [] (std::vector<size_t> v) { return IsMultiple(v[0], (v[1]*v[2])/v[3]); };
// Requirement for unrolling the WGD loop
- tuner.AddConstraint(id, MultipleOfX, {"WGD", "KWID"});
+ constraints.push_back({MultipleOfX, {"WGD", "KWID"}});
// Required for integer MWID and NWID
- tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "MDIMCD", "VWMD"});
- tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "NDIMCD", "VWND"});
+ constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMCD", "VWMD"}});
+ constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMCD", "VWND"}});
// Required for integer MWIAD and NWIBD
- tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "MDIMAD", "VWMD"});
- tuner.AddConstraint(id, MultipleOfXMulY, {"WGD", "NDIMBD", "VWND"});
+ constraints.push_back({MultipleOfXMulY, {"WGD", "MDIMAD", "VWMD"}});
+ constraints.push_back({MultipleOfXMulY, {"WGD", "NDIMBD", "VWND"}});
// WGD has to be a multiple of KDIMAD = ((MDIMCD*NDIMCD)/(MDIMAD)) and KDIMBD = (...)
- tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "MDIMAD"});
- tuner.AddConstraint(id, MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "NDIMBD"});
+ constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "MDIMAD"}});
+ constraints.push_back({MultipleOfXMulYDivZ, {"WGD", "MDIMCD", "NDIMCD", "NDIMBD"}});
// Extra constraints for variation 1 to limit the set of options significantly
if (V==1) {
auto IsEqual = [] (std::vector<size_t> v) { return v[0] == v[1]; };
- tuner.AddConstraint(id, IsEqual, {"MDIMCD", "MDIMAD"});
- tuner.AddConstraint(id, IsEqual, {"NDIMCD", "NDIMBD"});
+ constraints.push_back({IsEqual, {"MDIMCD", "MDIMAD"}});
+ constraints.push_back({IsEqual, {"NDIMCD", "NDIMBD"}});
}
- }
-
- // Sets the local memory size
- static void SetLocalMemorySize(cltune::Tuner &tuner, const size_t id, const Arguments<T> &args) {
- auto LocalMemorySize = [args] (std::vector<size_t> v) {
- return ((v[0]*(v[0] + v[1]) + v[0]*(v[0] + v[2]))*GetBytes(args.precision));
- };
- tuner.SetLocalMemoryUsage(id, LocalMemorySize, {"WGD", "PADA", "PADB"});
+ return constraints;
}
// Sets the kernel's arguments
- static void SetArguments(cltune::Tuner &tuner, const Arguments<T> &args,
- std::vector<T> &, std::vector<T> &,
- std::vector<T> &a_mat, std::vector<T> &b_mat, std::vector<T> &c_mat,
- std::vector<T> &) {
- tuner.AddArgumentScalar(static_cast<int>(args.m));
- tuner.AddArgumentScalar(static_cast<int>(args.n));
- tuner.AddArgumentScalar(static_cast<int>(args.k));
- tuner.AddArgumentScalar(GetRealArg(args.alpha));
- tuner.AddArgumentScalar(GetRealArg(args.beta));
- tuner.AddArgumentInput(a_mat);
- tuner.AddArgumentScalar(0); // a_offset
- tuner.AddArgumentScalar(static_cast<int>(args.k)); // a_ld
- tuner.AddArgumentInput(b_mat);
- tuner.AddArgumentScalar(0); // b_offset
- tuner.AddArgumentScalar(static_cast<int>(args.n)); // b_ld
- tuner.AddArgumentOutput(c_mat);
- tuner.AddArgumentScalar(0); // c_offset
- tuner.AddArgumentScalar(static_cast<int>(args.n)); // c_ld
- tuner.AddArgumentScalar(1); // c_do_transpose
- tuner.AddArgumentScalar(0); // a_conjugate
- tuner.AddArgumentScalar(0); // b_conjugate
+ static void SetArguments(Kernel &kernel, const Arguments<T> &args,
+ std::vector<Buffer<T>>& buffers) {
+ kernel.SetArgument(0, static_cast<int>(args.m));
+ kernel.SetArgument(1, static_cast<int>(args.n));
+ kernel.SetArgument(2, static_cast<int>(args.k));
+ kernel.SetArgument(3, GetRealArg(args.alpha));
+ kernel.SetArgument(4, GetRealArg(args.beta));
+ kernel.SetArgument(5, buffers[2]()); // 2 == A matrix
+ kernel.SetArgument(6, 0); // a_offset
+ kernel.SetArgument(7, static_cast<int>(args.k)); // a_ld
+ kernel.SetArgument(8, buffers[3]()); // 3 == B matrix
+ kernel.SetArgument(9, 0); // b_offset
+ kernel.SetArgument(10, static_cast<int>(args.n)); // b_ld
+ kernel.SetArgument(11, buffers[4]()); // 4 == C matrix
+ kernel.SetArgument(12, 0); // c_offset
+ kernel.SetArgument(13, static_cast<int>(args.n)); // c_ld
+ kernel.SetArgument(14, 1); // c_do_transpose
+ kernel.SetArgument(15, 0); // a_conjugate
+ kernel.SetArgument(16, 0); // b_conjugate
}
};