summaryrefslogtreecommitdiff
path: root/src/routines
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2018-02-02 21:18:37 +0100
committerCedric Nugteren <web@cedricnugteren.nl>2018-02-02 21:18:37 +0100
commit69ed46c8da69ee18338eca5102ead43410cc01b5 (patch)
tree575c4eef2a3210117a574f25f81662c503ec207d /src/routines
parentae66782eabc574a507b8cfe2b83f2df23b1a36c1 (diff)
Implemented the XHAD Hadamard product routine
Diffstat (limited to 'src/routines')
-rw-r--r--src/routines/levelx/xhad.cpp58
1 files changed, 57 insertions, 1 deletions
diff --git a/src/routines/levelx/xhad.cpp b/src/routines/levelx/xhad.cpp
index 46ae8031..da416cc7 100644
--- a/src/routines/levelx/xhad.cpp
+++ b/src/routines/levelx/xhad.cpp
@@ -24,7 +24,7 @@ template <typename T>
Xhad<T>::Xhad(Queue &queue, EventPointer event, const std::string &name):
Routine(queue, event, name, {"Xaxpy"}, PrecisionValue<T>(), {}, {
#include "../../kernels/level1/level1.opencl"
-#include "../../kernels/level1/xaxpy.opencl"
+#include "../../kernels/level1/xhad.opencl"
}) {
}
@@ -45,6 +45,62 @@ void Xhad<T>::DoHad(const size_t n, const T alpha,
TestVectorY(n, y_buffer, y_offset, y_inc);
TestVectorY(n, z_buffer, z_offset, z_inc); // TODO: Make a TestVectorZ function with error codes
+ // Determines whether or not the fast-version can be used
+ const auto use_faster_kernel = (x_offset == 0) && (x_inc == 1) &&
+ (y_offset == 0) && (y_inc == 1) &&
+ (z_offset == 0) && (z_inc == 1) &&
+ IsMultiple(n, db_["WPT"]*db_["VW"]);
+ const auto use_fastest_kernel = use_faster_kernel &&
+ IsMultiple(n, db_["WGS"]*db_["WPT"]*db_["VW"]);
+
+ // If possible, run the fast-version of the kernel
+ const auto kernel_name = (use_fastest_kernel) ? "XhadFastest" :
+ (use_faster_kernel) ? "XhadFaster" : "Xhad";
+
+ // Retrieves the Xhad kernel from the compiled binary
+ auto kernel = Kernel(program_, kernel_name);
+
+ // Sets the kernel arguments
+ if (use_faster_kernel || use_fastest_kernel) {
+ kernel.SetArgument(0, static_cast<int>(n));
+ kernel.SetArgument(1, GetRealArg(alpha));
+ kernel.SetArgument(2, GetRealArg(beta));
+ kernel.SetArgument(3, x_buffer());
+ kernel.SetArgument(4, y_buffer());
+ kernel.SetArgument(5, z_buffer());
+ }
+ else {
+ kernel.SetArgument(0, static_cast<int>(n));
+ kernel.SetArgument(1, GetRealArg(alpha));
+ kernel.SetArgument(2, GetRealArg(beta));
+ kernel.SetArgument(3, x_buffer());
+ kernel.SetArgument(4, static_cast<int>(x_offset));
+ kernel.SetArgument(5, static_cast<int>(x_inc));
+ kernel.SetArgument(6, y_buffer());
+ kernel.SetArgument(7, static_cast<int>(y_offset));
+ kernel.SetArgument(8, static_cast<int>(y_inc));
+ kernel.SetArgument(9, z_buffer());
+ kernel.SetArgument(10, static_cast<int>(z_offset));
+ kernel.SetArgument(11, static_cast<int>(z_inc));
+ }
+
+ // Launches the kernel
+ if (use_fastest_kernel) {
+ auto global = std::vector<size_t>{CeilDiv(n, db_["WPT"]*db_["VW"])};
+ auto local = std::vector<size_t>{db_["WGS"]};
+ RunKernel(kernel, queue_, device_, global, local, event_);
+ }
+ else if (use_faster_kernel) {
+ auto global = std::vector<size_t>{Ceil(CeilDiv(n, db_["WPT"]*db_["VW"]), db_["WGS"])};
+ auto local = std::vector<size_t>{db_["WGS"]};
+ RunKernel(kernel, queue_, device_, global, local, event_);
+ }
+ else {
+ const auto n_ceiled = Ceil(n, db_["WGS"]*db_["WPT"]);
+ auto global = std::vector<size_t>{n_ceiled/db_["WPT"]};
+ auto local = std::vector<size_t>{db_["WGS"]};
+ RunKernel(kernel, queue_, device_, global, local, event_);
+ }
}
// =================================================================================================