summaryrefslogtreecommitdiff
path: root/src/routines/level3/xgemm.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/routines/level3/xgemm.cpp')
-rw-r--r--src/routines/level3/xgemm.cpp308
1 files changed, 140 insertions, 168 deletions
diff --git a/src/routines/level3/xgemm.cpp b/src/routines/level3/xgemm.cpp
index 1602c69f..4f70dc7a 100644
--- a/src/routines/level3/xgemm.cpp
+++ b/src/routines/level3/xgemm.cpp
@@ -24,8 +24,7 @@ template <typename T>
Xgemm<T>::Xgemm(Queue &queue, EventPointer event, const std::string &name):
Routine(queue, event, name,
{"Copy","Pad","Transpose","Padtranspose","Xgemm","XgemmDirect","KernelSelection"},
- PrecisionValue<T>()) {
- source_string_ =
+ PrecisionValue<T>(), {}, {
#include "../../kernels/level3/level3.opencl"
#include "../../kernels/level3/copy_fast.opencl"
#include "../../kernels/level3/copy_pad.opencl"
@@ -37,30 +36,28 @@ Xgemm<T>::Xgemm(Queue &queue, EventPointer event, const std::string &name):
#include "../../kernels/level3/xgemm_direct_part1.opencl"
#include "../../kernels/level3/xgemm_direct_part2.opencl"
#include "../../kernels/level3/xgemm_direct_part3.opencl"
- ;
- auto source_string_part_2 = // separated in two parts to prevent C1091 in MSVC 2013
+ , // separated in two parts to prevent C1091 in MSVC 2013
#include "../../kernels/level3/xgemm_part1.opencl"
#include "../../kernels/level3/xgemm_part2.opencl"
#include "../../kernels/level3/xgemm_part3.opencl"
- ;
- source_string_ += source_string_part_2;
+ }) {
}
// =================================================================================================
// The main routine
template <typename T>
-StatusCode Xgemm<T>::DoGemm(const Layout layout,
- const Transpose a_transpose, const Transpose b_transpose,
- const size_t m, const size_t n, const size_t k,
- const T alpha,
- const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
- const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
- const T beta,
- const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld) {
+void Xgemm<T>::DoGemm(const Layout layout,
+ const Transpose a_transpose, const Transpose b_transpose,
+ const size_t m, const size_t n, const size_t k,
+ const T alpha,
+ const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
+ const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
+ const T beta,
+ const Buffer<T> &c_buffer, const size_t c_offset, const size_t c_ld) {
// Makes sure all dimensions are larger than zero
- if ((m == 0) || (n == 0) || (k == 0)) { return StatusCode::kInvalidDimension; }
+ if ((m == 0) || (n == 0) || (k == 0)) { throw BLASError(StatusCode::kInvalidDimension); }
// Computes whether or not the matrices are transposed in memory. This is based on their layout
// (row or column-major) and whether or not they are requested to be pre-transposed. Note
@@ -99,12 +96,9 @@ StatusCode Xgemm<T>::DoGemm(const Layout layout,
// matrix A cannot be less than K when rotated, or less than M when not-rotated
// matrix B cannot be less than N when rotated, or less than K when not-rotated
// matrix C cannot be less than N when rotated, or less than M when not-rotated
- auto status = TestMatrixA(a_one, a_two, a_buffer, a_offset, a_ld);
- if (ErrorIn(status)) { return status; }
- status = TestMatrixB(b_one, b_two, b_buffer, b_offset, b_ld);
- if (ErrorIn(status)) { return status; }
- status = TestMatrixC(c_one, c_two, c_buffer, c_offset, c_ld);
- if (ErrorIn(status)) { return status; }
+ TestMatrixA(a_one, a_two, a_buffer, a_offset, a_ld);
+ TestMatrixB(b_one, b_two, b_buffer, b_offset, b_ld);
+ TestMatrixC(c_one, c_two, c_buffer, c_offset, c_ld);
// Selects which version of GEMM to run
const auto do_gemm_direct = (m * n * k < db_["XGEMM_MIN_INDIRECT_SIZE"]);
@@ -131,7 +125,7 @@ StatusCode Xgemm<T>::DoGemm(const Layout layout,
// requirements, but several pre and post-processing kernels take care of those. However, the
// overhead of these extra kernels might not be ideal for certain devices/arguments.
template <typename T>
-StatusCode Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
+void Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k,
const T alpha,
const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
@@ -142,8 +136,6 @@ StatusCode Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k
const size_t a_one, const size_t a_two, const bool a_want_rotated,
const size_t b_one, const size_t b_two, const bool b_want_rotated,
const size_t c_one, const size_t c_two, const bool c_want_rotated) {
- auto status = StatusCode::kSuccess;
-
// Calculates the ceiled versions of m, n, and k
const auto m_ceiled = Ceil(m, db_["MWG"]);
const auto n_ceiled = Ceil(n, db_["NWG"]);
@@ -158,109 +150,95 @@ StatusCode Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k
const auto c_one_i = (c_want_rotated) ? n_ceiled : m_ceiled;
const auto c_two_i = (c_want_rotated) ? m_ceiled : n_ceiled;
- // The padded/transposed input/output matrices: if memory allocation fails, throw an exception
- try {
-
- // Loads the program from the database
- const auto program = GetProgramFromCache(context_, PrecisionValue<T>(), routine_name_);
-
- // Determines whether or not temporary matrices are needed
- auto a_no_temp = a_one == a_one_i && a_two == a_two_i && a_ld == a_one && a_offset == 0 &&
- a_do_transpose == false && a_conjugate == false;
- auto b_no_temp = b_one == b_one_i && b_two == b_two_i && b_ld == b_one && b_offset == 0 &&
- b_do_transpose == false && b_conjugate == false;
- auto c_no_temp = c_one == c_one_i && c_two == c_two_i && c_ld == c_one && c_offset == 0 &&
- c_do_transpose == false;
-
- // Creates the temporary matrices
- const auto a_temp = (a_no_temp) ? a_buffer : Buffer<T>(context_, a_one_i*a_two_i);
- const auto b_temp = (b_no_temp) ? b_buffer : Buffer<T>(context_, b_one_i*b_two_i);
- const auto c_temp = (c_no_temp) ? c_buffer : Buffer<T>(context_, c_one_i*c_two_i);
-
- // Events of all kernels (including pre/post processing kernels)
- auto eventWaitList = std::vector<Event>();
- auto emptyEventList = std::vector<Event>();
-
- // Runs the pre-processing kernel for matrix A. This transposes the matrix, but also pads zeros
- // to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In
- // case nothing has to be done, these kernels can be skipped.
- if (!a_no_temp) {
- auto eventProcessA = Event();
- status = PadCopyTransposeMatrix(queue_, device_, db_, eventProcessA.pointer(), emptyEventList,
- a_one, a_two, a_ld, a_offset, a_buffer,
- a_one_i, a_two_i, a_one_i, 0, a_temp,
- ConstantOne<T>(), program,
- true, a_do_transpose, a_conjugate);
- if (ErrorIn(status)) { return status; }
- eventWaitList.push_back(eventProcessA);
- }
-
- // As above, but now for matrix B
- if (!b_no_temp) {
- auto eventProcessB = Event();
- status = PadCopyTransposeMatrix(queue_, device_, db_, eventProcessB.pointer(), emptyEventList,
- b_one, b_two, b_ld, b_offset, b_buffer,
- b_one_i, b_two_i, b_one_i, 0, b_temp,
- ConstantOne<T>(), program,
- true, b_do_transpose, b_conjugate);
- if (ErrorIn(status)) { return status; }
- eventWaitList.push_back(eventProcessB);
- }
-
- // As above, but now for matrix C. This is only necessary if C is used both as input and output.
- if (!c_no_temp && beta != static_cast<T>(0)) {
- auto eventProcessC = Event();
- status = PadCopyTransposeMatrix(queue_, device_, db_, eventProcessC.pointer(), emptyEventList,
- c_one, c_two, c_ld, c_offset, c_buffer,
- c_one_i, c_two_i, c_one_i, 0, c_temp,
- ConstantOne<T>(), program,
- true, c_do_transpose, false);
- if (ErrorIn(status)) { return status; }
- eventWaitList.push_back(eventProcessC);
- }
-
- // Retrieves the Xgemm kernel from the compiled binary
- try {
- auto kernel = Kernel(program, "Xgemm");
-
- // Sets the kernel arguments
- kernel.SetArgument(0, static_cast<int>(m_ceiled));
- kernel.SetArgument(1, static_cast<int>(n_ceiled));
- kernel.SetArgument(2, static_cast<int>(k_ceiled));
- kernel.SetArgument(3, GetRealArg(alpha));
- kernel.SetArgument(4, GetRealArg(beta));
- kernel.SetArgument(5, a_temp());
- kernel.SetArgument(6, b_temp());
- kernel.SetArgument(7, c_temp());
-
- // Computes the global and local thread sizes
- const auto global = std::vector<size_t>{
- (c_one_i * db_["MDIMC"]) / db_["MWG"],
- (c_two_i * db_["NDIMC"]) / db_["NWG"]
- };
- const auto local = std::vector<size_t>{db_["MDIMC"], db_["NDIMC"]};
-
- // Launches the kernel
- auto eventKernel = Event();
- auto eventPointer = (!c_no_temp) ? eventKernel.pointer() : event_;
- status = RunKernel(kernel, queue_, device_, global, local, eventPointer, eventWaitList);
- if (ErrorIn(status)) { return status; }
-
- // Runs the post-processing kernel if needed
- if (!c_no_temp) {
- eventWaitList.push_back(eventKernel);
- status = PadCopyTransposeMatrix(queue_, device_, db_, event_, eventWaitList,
- c_one_i, c_two_i, c_one_i, 0, c_temp,
- c_one, c_two, c_ld, c_offset, c_buffer,
- ConstantOne<T>(), program,
- false, c_do_transpose, false);
- if (ErrorIn(status)) { return status; }
- }
-
- // Successfully finished the computation
- return StatusCode::kSuccess;
- } catch (...) { return StatusCode::kInvalidKernel; }
- } catch (...) { return StatusCode::kTempBufferAllocFailure; }
+ // Loads the program from the database
+ const auto program = GetProgramFromCache(context_, PrecisionValue<T>(), routine_name_);
+
+ // Determines whether or not temporary matrices are needed
+ auto a_no_temp = a_one == a_one_i && a_two == a_two_i && a_ld == a_one && a_offset == 0 &&
+ a_do_transpose == false && a_conjugate == false;
+ auto b_no_temp = b_one == b_one_i && b_two == b_two_i && b_ld == b_one && b_offset == 0 &&
+ b_do_transpose == false && b_conjugate == false;
+ auto c_no_temp = c_one == c_one_i && c_two == c_two_i && c_ld == c_one && c_offset == 0 &&
+ c_do_transpose == false;
+
+ // Creates the temporary matrices
+ const auto a_temp = (a_no_temp) ? a_buffer : Buffer<T>(context_, a_one_i*a_two_i);
+ const auto b_temp = (b_no_temp) ? b_buffer : Buffer<T>(context_, b_one_i*b_two_i);
+ const auto c_temp = (c_no_temp) ? c_buffer : Buffer<T>(context_, c_one_i*c_two_i);
+
+ // Events of all kernels (including pre/post processing kernels)
+ auto eventWaitList = std::vector<Event>();
+ auto emptyEventList = std::vector<Event>();
+
+ // Runs the pre-processing kernel for matrix A. This transposes the matrix, but also pads zeros
+ // to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In
+ // case nothing has to be done, these kernels can be skipped.
+ if (!a_no_temp) {
+ auto eventProcessA = Event();
+ PadCopyTransposeMatrix(queue_, device_, db_, eventProcessA.pointer(), emptyEventList,
+ a_one, a_two, a_ld, a_offset, a_buffer,
+ a_one_i, a_two_i, a_one_i, 0, a_temp,
+ ConstantOne<T>(), program,
+ true, a_do_transpose, a_conjugate);
+ eventWaitList.push_back(eventProcessA);
+ }
+
+ // As above, but now for matrix B
+ if (!b_no_temp) {
+ auto eventProcessB = Event();
+ PadCopyTransposeMatrix(queue_, device_, db_, eventProcessB.pointer(), emptyEventList,
+ b_one, b_two, b_ld, b_offset, b_buffer,
+ b_one_i, b_two_i, b_one_i, 0, b_temp,
+ ConstantOne<T>(), program,
+ true, b_do_transpose, b_conjugate);
+ eventWaitList.push_back(eventProcessB);
+ }
+
+ // As above, but now for matrix C. This is only necessary if C is used both as input and output.
+ if (!c_no_temp && beta != static_cast<T>(0)) {
+ auto eventProcessC = Event();
+ PadCopyTransposeMatrix(queue_, device_, db_, eventProcessC.pointer(), emptyEventList,
+ c_one, c_two, c_ld, c_offset, c_buffer,
+ c_one_i, c_two_i, c_one_i, 0, c_temp,
+ ConstantOne<T>(), program,
+ true, c_do_transpose, false);
+ eventWaitList.push_back(eventProcessC);
+ }
+
+ // Retrieves the Xgemm kernel from the compiled binary
+ auto kernel = Kernel(program, "Xgemm");
+
+ // Sets the kernel arguments
+ kernel.SetArgument(0, static_cast<int>(m_ceiled));
+ kernel.SetArgument(1, static_cast<int>(n_ceiled));
+ kernel.SetArgument(2, static_cast<int>(k_ceiled));
+ kernel.SetArgument(3, GetRealArg(alpha));
+ kernel.SetArgument(4, GetRealArg(beta));
+ kernel.SetArgument(5, a_temp());
+ kernel.SetArgument(6, b_temp());
+ kernel.SetArgument(7, c_temp());
+
+ // Computes the global and local thread sizes
+ const auto global = std::vector<size_t>{
+ (c_one_i * db_["MDIMC"]) / db_["MWG"],
+ (c_two_i * db_["NDIMC"]) / db_["NWG"]
+ };
+ const auto local = std::vector<size_t>{db_["MDIMC"], db_["NDIMC"]};
+
+ // Launches the kernel
+ auto eventKernel = Event();
+ auto eventPointer = (!c_no_temp) ? eventKernel.pointer() : event_;
+ RunKernel(kernel, queue_, device_, global, local, eventPointer, eventWaitList);
+
+ // Runs the post-processing kernel if needed
+ if (!c_no_temp) {
+ eventWaitList.push_back(eventKernel);
+ PadCopyTransposeMatrix(queue_, device_, db_, event_, eventWaitList,
+ c_one_i, c_two_i, c_one_i, 0, c_temp,
+ c_one, c_two, c_ld, c_offset, c_buffer,
+ ConstantOne<T>(), program,
+ false, c_do_transpose, false);
+ }
}
@@ -268,7 +246,7 @@ StatusCode Xgemm<T>::GemmIndirect(const size_t m, const size_t n, const size_t k
// The direct version of GEMM, requiring just one kernel, no pre or post-processing kernels.
template <typename T>
-StatusCode Xgemm<T>::GemmDirect(const size_t m, const size_t n, const size_t k,
+void Xgemm<T>::GemmDirect(const size_t m, const size_t n, const size_t k,
const T alpha,
const Buffer<T> &a_buffer, const size_t a_offset, const size_t a_ld,
const Buffer<T> &b_buffer, const size_t b_offset, const size_t b_ld,
@@ -281,46 +259,40 @@ StatusCode Xgemm<T>::GemmDirect(const size_t m, const size_t n, const size_t k,
const auto program = GetProgramFromCache(context_, PrecisionValue<T>(), routine_name_);
// Retrieves the proper XgemmDirect kernel from the compiled binary
- try {
- const auto name = (a_do_transpose) ? (b_do_transpose ? "XgemmDirectTT" : "XgemmDirectTN") :
- (b_do_transpose ? "XgemmDirectNT" : "XgemmDirectNN");
- auto kernel = Kernel(program, name);
-
- // Sets the kernel arguments
- kernel.SetArgument(0, static_cast<int>(m));
- kernel.SetArgument(1, static_cast<int>(n));
- kernel.SetArgument(2, static_cast<int>(k));
- kernel.SetArgument(3, GetRealArg(alpha));
- kernel.SetArgument(4, GetRealArg(beta));
- kernel.SetArgument(5, a_buffer());
- kernel.SetArgument(6, static_cast<int>(a_offset));
- kernel.SetArgument(7, static_cast<int>(a_ld));
- kernel.SetArgument(8, b_buffer());
- kernel.SetArgument(9, static_cast<int>(b_offset));
- kernel.SetArgument(10, static_cast<int>(b_ld));
- kernel.SetArgument(11, c_buffer());
- kernel.SetArgument(12, static_cast<int>(c_offset));
- kernel.SetArgument(13, static_cast<int>(c_ld));
- kernel.SetArgument(14, static_cast<int>(c_do_transpose));
- kernel.SetArgument(15, static_cast<int>(a_conjugate));
- kernel.SetArgument(16, static_cast<int>(b_conjugate));
-
- // Computes the global and local thread sizes
- const auto m_ceiled = Ceil(m, db_["WGD"]);
- const auto n_ceiled = Ceil(n, db_["WGD"]);
- const auto global = std::vector<size_t>{
- (m_ceiled * db_["MDIMCD"]) / db_["WGD"],
- (n_ceiled * db_["NDIMCD"]) / db_["WGD"]
- };
- const auto local = std::vector<size_t>{db_["MDIMCD"], db_["NDIMCD"]};
-
- // Launches the kernel
- auto status = RunKernel(kernel, queue_, device_, global, local, event_);
- if (ErrorIn(status)) { return status; }
-
- // Successfully finished the computation
- return StatusCode::kSuccess;
- } catch (...) { return StatusCode::kInvalidKernel; }
+ const auto name = (a_do_transpose) ? (b_do_transpose ? "XgemmDirectTT" : "XgemmDirectTN") :
+ (b_do_transpose ? "XgemmDirectNT" : "XgemmDirectNN");
+ auto kernel = Kernel(program, name);
+
+ // Sets the kernel arguments
+ kernel.SetArgument(0, static_cast<int>(m));
+ kernel.SetArgument(1, static_cast<int>(n));
+ kernel.SetArgument(2, static_cast<int>(k));
+ kernel.SetArgument(3, GetRealArg(alpha));
+ kernel.SetArgument(4, GetRealArg(beta));
+ kernel.SetArgument(5, a_buffer());
+ kernel.SetArgument(6, static_cast<int>(a_offset));
+ kernel.SetArgument(7, static_cast<int>(a_ld));
+ kernel.SetArgument(8, b_buffer());
+ kernel.SetArgument(9, static_cast<int>(b_offset));
+ kernel.SetArgument(10, static_cast<int>(b_ld));
+ kernel.SetArgument(11, c_buffer());
+ kernel.SetArgument(12, static_cast<int>(c_offset));
+ kernel.SetArgument(13, static_cast<int>(c_ld));
+ kernel.SetArgument(14, static_cast<int>(c_do_transpose));
+ kernel.SetArgument(15, static_cast<int>(a_conjugate));
+ kernel.SetArgument(16, static_cast<int>(b_conjugate));
+
+ // Computes the global and local thread sizes
+ const auto m_ceiled = Ceil(m, db_["WGD"]);
+ const auto n_ceiled = Ceil(n, db_["WGD"]);
+ const auto global = std::vector<size_t>{
+ (m_ceiled * db_["MDIMCD"]) / db_["WGD"],
+ (n_ceiled * db_["NDIMCD"]) / db_["WGD"]
+ };
+ const auto local = std::vector<size_t>{db_["MDIMCD"], db_["NDIMCD"]};
+
+ // Launches the kernel
+ RunKernel(kernel, queue_, device_, global, local, event_);
}
// =================================================================================================