summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2017-03-19 16:04:04 +0100
committerCedric Nugteren <web@cedricnugteren.nl>2017-03-19 16:04:04 +0100
commitc27d2f0c1ea69820f39d440f307c7bc3f97472c4 (patch)
tree772e8166917905ec800586ca6e2b9e120c3ebf78
parent2fd04dae83acb01933856e768a938db9ac808ce0 (diff)
Added an (optional) non-direct implementation of the batched GEMM routine
-rw-r--r--src/kernels/level3/xgemm_batched.opencl70
-rw-r--r--src/kernels/level3/xgemm_direct_batched.opencl12
-rw-r--r--src/routines/levelx/xgemmbatched.cpp220
-rw-r--r--src/routines/levelx/xgemmbatched.hpp25
4 files changed, 310 insertions, 17 deletions
diff --git a/src/kernels/level3/xgemm_batched.opencl b/src/kernels/level3/xgemm_batched.opencl
new file mode 100644
index 00000000..c7bf10d5
--- /dev/null
+++ b/src/kernels/level3/xgemm_batched.opencl
@@ -0,0 +1,70 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file contains the batched version of the non-direct GEMM kernel. See part 1 for information
+// about the non-batched version of the kernel.
+//
+// =================================================================================================
+
+// Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string
+// literal). Comment-out this line for syntax-highlighting when developing.
+R"(
+
+// =================================================================================================
+
+// Main entry point of the kernel. This is the regular full version.
+__kernel __attribute__((reqd_work_group_size(MDIMC, NDIMC, 1)))
+void XgemmBatched(const int kSizeM, const int kSizeN, const int kSizeK,
+ const __constant real_arg* arg_alphas,
+ const __constant real_arg* arg_betas,
+ const __global realM* restrict agm, const int a_one, const int a_two,
+ const __global realN* restrict bgm, const int b_one, const int b_two,
+ __global realM* cgm, const int c_one, const int c_two) {
+ const int batch = get_group_id(2);
+ const real alpha = GetRealArg(arg_alphas[batch]);
+ const real beta = GetRealArg(arg_betas[batch]);
+
+ // Sets the offsets
+ const int a_offset = batch * a_one * a_two;
+ const int b_offset = batch * b_one * b_two;
+ const int c_offset = batch * c_one * c_two;
+ const __global realM* restrict agm_ = &agm[a_offset / VWM];
+ const __global realN* restrict bgm_ = &bgm[b_offset / VWN];
+ __global realM* restrict cgm_ = &cgm[c_offset / VWM];
+
+ // Allocates workgroup-private memory (local memory)
+ #if SA == 1
+ __local realM alm[KWG * MWG/VWM];
+ #endif
+ #if SB == 1
+ __local realN blm[KWG * NWG/VWN];
+ #endif
+
+ // Computes the matrix-multiplication and stores the result in register memory
+ realM cpm[NWI][MWI/VWM];
+ #if SA == 1 && SB == 1
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, cpm, alm, blm);
+ #elif SA == 1
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, cpm, alm);
+ #elif SB == 1
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, cpm, blm);
+ #else
+ XgemmBody(kSizeM, kSizeN, kSizeK, agm_, bgm_, cgm_, cpm);
+ #endif
+
+ // Stores an MWG * NWG tile of results and performs the multiplication with alpha and beta
+ StoreResults(cgm_, cpm, kSizeM, alpha, beta);
+}
+
+// =================================================================================================
+
+// End of the C++11 raw string literal
+)"
+
+// =================================================================================================
diff --git a/src/kernels/level3/xgemm_direct_batched.opencl b/src/kernels/level3/xgemm_direct_batched.opencl
index 3377d5c7..fa582cff 100644
--- a/src/kernels/level3/xgemm_direct_batched.opencl
+++ b/src/kernels/level3/xgemm_direct_batched.opencl
@@ -7,8 +7,8 @@
// Author(s):
// Cedric Nugteren <www.cedricnugteren.nl>
//
-// This file contains the batched version of the GEMM kernels. See part 1 for information about the
-// non-batched version of the kernel.
+// This file contains the batched version of the direct GEMM kernels. See part 1 for information
+// about the non-batched version of the kernel.
//
// =================================================================================================
@@ -18,7 +18,7 @@ R"(
// =================================================================================================
-// Direct version of the GEMM kernel with [A, B] = [non-transposed, non-transposed]
+// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, non-transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
@@ -39,7 +39,7 @@ __kernel void XgemmDirectBatchedNN(const int kSizeM, const int kSizeN, const int
alm, blm, 0, 0, c_transpose, a_conjugate, b_conjugate);
}
-// Direct version of the GEMM kernel with [A, B] = [non-transposed, transposed]
+// Direct version of the batched GEMM kernel with [A, B] = [non-transposed, transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
@@ -60,7 +60,7 @@ __kernel void XgemmDirectBatchedNT(const int kSizeM, const int kSizeN, const int
alm, blm, 0, 1, c_transpose, a_conjugate, b_conjugate);
}
-// Direct version of the GEMM kernel with [A, B] = [transposed, non-transposed]
+// Direct version of the batched GEMM kernel with [A, B] = [transposed, non-transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
@@ -81,7 +81,7 @@ __kernel void XgemmDirectBatchedTN(const int kSizeM, const int kSizeN, const int
alm, blm, 1, 0, c_transpose, a_conjugate, b_conjugate);
}
-// Direct version of the GEMM kernel with [A, B] = [transposed, transposed]
+// Direct version of the batched GEMM kernel with [A, B] = [transposed, transposed]
__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1)))
__kernel void XgemmDirectBatchedTT(const int kSizeM, const int kSizeN, const int kSizeK,
const __constant real_arg* arg_alphas, const __constant real_arg* arg_betas,
diff --git a/src/routines/levelx/xgemmbatched.cpp b/src/routines/levelx/xgemmbatched.cpp
index a11ebfd0..0fea1922 100644
--- a/src/routines/levelx/xgemmbatched.cpp
+++ b/src/routines/levelx/xgemmbatched.cpp
@@ -22,11 +22,24 @@ namespace clblast {
// Constructor: forwards to base class constructor
template <typename T>
XgemmBatched<T>::XgemmBatched(Queue &queue, EventPointer event, const std::string &name):
- Routine(queue, event, name, {"XgemmDirect"}, PrecisionValue<T>(), {}, {
+ Routine(queue, event, name,
+ {"Copy","Pad","Transpose","Padtranspose","Xgemm","XgemmDirect","KernelSelection"},
+ PrecisionValue<T>(), {}, {
+ #include "../../kernels/level3/level3.opencl"
+ #include "../../kernels/level3/copy_fast.opencl"
+ #include "../../kernels/level3/copy_pad.opencl"
+ #include "../../kernels/level3/transpose_fast.opencl"
+ #include "../../kernels/level3/transpose_pad.opencl"
+ , // separated in multiple parts to prevent C1091 in MSVC 2013
#include "../../kernels/level3/xgemm_direct_part1.opencl"
#include "../../kernels/level3/xgemm_direct_part2.opencl"
#include "../../kernels/level3/xgemm_direct_part3.opencl"
, // separated in multiple parts to prevent C1091 in MSVC 2013
+ #include "../../kernels/level3/xgemm_part1.opencl"
+ #include "../../kernels/level3/xgemm_part2.opencl"
+ #include "../../kernels/level3/xgemm_part3.opencl"
+ , // separated in multiple parts to prevent C1091 in MSVC 2013
+ #include "../../kernels/level3/xgemm_batched.opencl"
#include "../../kernels/level3/xgemm_direct_batched.opencl"
}) {
}
@@ -86,20 +99,205 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans
TestMatrixC(c_one, c_two, c_buffer, c_offsets[batch], c_ld);
}
- // Upload the arguments to the device
+ // Upload the scalar arguments to the device
+ auto alphas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
+ auto betas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
+ alphas_device.Write(queue_, batch_count, alphas);
+ betas_device.Write(queue_, batch_count, betas);
+
+ // Converts the offset to integers
std::vector<int> a_offsets_int(a_offsets.begin(), a_offsets.end());
std::vector<int> b_offsets_int(b_offsets.begin(), b_offsets.end());
std::vector<int> c_offsets_int(c_offsets.begin(), c_offsets.end());
+
+ // Selects which version of the batched GEMM to run
+ const auto do_gemm_direct = true;
+ if (do_gemm_direct) { // single generic kernel
+ BatchedGemmDirect(m, n, k, alphas_device,
+ a_buffer, a_offsets_int, a_ld, b_buffer, b_offsets_int, b_ld,
+ betas_device, c_buffer, c_offsets_int, c_ld,
+ a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate,
+ batch_count);
+ }
+ else { // pre/post-processing plus a very fast kernel
+ BatchedGemmIndirect(m, n, k, alphas_device,
+ a_buffer, a_offsets_int, a_ld, b_buffer, b_offsets_int, b_ld,
+ betas_device, c_buffer, c_offsets_int, c_ld,
+ a_do_transpose, b_do_transpose, c_do_transpose, a_conjugate, b_conjugate,
+ a_one, a_two, a_want_rotated,
+ b_one, b_two, b_want_rotated,
+ c_one, c_two, c_want_rotated,
+ batch_count);
+ }
+}
+
+
+// =================================================================================================
+
+// The indirect version of batched GEMM. This uses the faster but non-general kernel. It has specific
+// requirements, but several pre and post-processing kernels take care of those. However, the
+// overhead of these extra kernels might not be ideal for certain devices/arguments.
+template <typename T>
+void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const size_t k,
+ const Buffer<T> &alphas,
+ const Buffer<T> &a_buffer, const std::vector<int> &a_offsets, const size_t a_ld,
+ const Buffer<T> &b_buffer, const std::vector<int> &b_offsets, const size_t b_ld,
+ const Buffer<T> &betas,
+ const Buffer<T> &c_buffer, const std::vector<int> &c_offsets, const size_t c_ld,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t a_one, const size_t a_two, const bool a_want_rotated,
+ const size_t b_one, const size_t b_two, const bool b_want_rotated,
+ const size_t c_one, const size_t c_two, const bool c_want_rotated,
+ const size_t batch_count) {
+ // Calculates the ceiled versions of m, n, and k
+ const auto m_ceiled = Ceil(Ceil(m, db_["MWG"]), db_["VWM"]);
+ const auto n_ceiled = Ceil(Ceil(n, db_["NWG"]), db_["VWN"]);
+ const auto k_ceiled = Ceil(Ceil(k, db_["KWG"]), db_["VWM"]);
+
+ // Computes the first and second "internal" (ceiled) dimensions of the 3 matrices taking into account
+ // whether the matrices need to be rotated or not for the kernel.
+ const auto a_one_i = (a_want_rotated) ? k_ceiled : m_ceiled;
+ const auto a_two_i = (a_want_rotated) ? m_ceiled : k_ceiled;
+ const auto b_one_i = (b_want_rotated) ? n_ceiled : k_ceiled;
+ const auto b_two_i = (b_want_rotated) ? k_ceiled : n_ceiled;
+ const auto c_one_i = (c_want_rotated) ? n_ceiled : m_ceiled;
+ const auto c_two_i = (c_want_rotated) ? m_ceiled : n_ceiled;
+
+ // Sets the "internal" offsets, i.e. the perfect offsets
+ auto a_offsets_i = std::vector<int>(batch_count);
+ auto b_offsets_i = std::vector<int>(batch_count);
+ auto c_offsets_i = std::vector<int>(batch_count);
+ for (auto batch = size_t{0}; batch < batch_count; ++batch) {
+ a_offsets_i[batch] = batch * a_one_i * a_two_i;
+ b_offsets_i[batch] = batch * b_one_i * b_two_i;
+ c_offsets_i[batch] = batch * c_one_i * c_two_i;
+ }
+
+ // Determines whether or not temporary matrices are needed
+ auto a_no_temp = a_one == a_one_i && a_two == a_two_i && a_ld == a_one && a_offsets == a_offsets_i &&
+ a_do_transpose == false && a_conjugate == false;
+ auto b_no_temp = b_one == b_one_i && b_two == b_two_i && b_ld == b_one && b_offsets == b_offsets_i &&
+ b_do_transpose == false && b_conjugate == false;
+ auto c_no_temp = c_one == c_one_i && c_two == c_two_i && c_ld == c_one && c_offsets == c_offsets_i &&
+ c_do_transpose == false;
+
+ // Creates the temporary matrices
+ const auto a_temp = (a_no_temp) ? a_buffer : Buffer<T>(context_, batch_count * a_one_i * a_two_i);
+ const auto b_temp = (b_no_temp) ? b_buffer : Buffer<T>(context_, batch_count * b_one_i * b_two_i);
+ const auto c_temp = (c_no_temp) ? c_buffer : Buffer<T>(context_, batch_count * c_one_i * c_two_i);
+
+ // Events of all kernels (including pre/post processing kernels)
+ auto eventWaitList = std::vector<Event>();
+ auto emptyEventList = std::vector<Event>();
+
+ // Runs the pre-processing kernel for matrix A. This transposes the matrix, but also pads zeros
+ // to fill it up until it reaches a certain multiple of size (kernel parameter dependent). In
+ // case nothing has to be done, these kernels can be skipped.
+ if (!a_no_temp) {
+ auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ auto a_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ a_offsets_device.Write(queue_, batch_count, a_offsets);
+ a_offsets_i_device.Write(queue_, batch_count, a_offsets_i);
+ auto eventProcessA = Event();
+ PadCopyTransposeMatrixBatched(queue_, device_, db_, eventProcessA.pointer(), emptyEventList,
+ a_one, a_two, a_ld, a_offsets_device, a_buffer,
+ a_one_i, a_two_i, a_one_i, a_offsets_i_device, a_temp,
+ program_, true, a_do_transpose, a_conjugate, batch_count);
+ eventWaitList.push_back(eventProcessA);
+ }
+
+ // As above, but now for matrix B
+ if (!b_no_temp) {
+ auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ auto b_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ b_offsets_device.Write(queue_, batch_count, b_offsets);
+ b_offsets_i_device.Write(queue_, batch_count, b_offsets_i);
+ auto eventProcessB = Event();
+ PadCopyTransposeMatrixBatched(queue_, device_, db_, eventProcessB.pointer(), emptyEventList,
+ b_one, b_two, b_ld, b_offsets_device, b_buffer,
+ b_one_i, b_two_i, b_one_i, b_offsets_i_device, b_temp,
+ program_, true, b_do_transpose, b_conjugate, batch_count);
+ eventWaitList.push_back(eventProcessB);
+ }
+
+ // As above, but now for matrix C
+ auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ auto c_offsets_i_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
+ if (!c_no_temp) {
+ c_offsets_device.Write(queue_, batch_count, c_offsets);
+ c_offsets_i_device.Write(queue_, batch_count, c_offsets_i);
+ auto eventProcessC = Event();
+ PadCopyTransposeMatrixBatched(queue_, device_, db_, eventProcessC.pointer(), emptyEventList,
+ c_one, c_two, c_ld, c_offsets_device, c_buffer,
+ c_one_i, c_two_i, c_one_i, c_offsets_i_device, c_temp,
+ program_, true, c_do_transpose, false, batch_count);
+ eventWaitList.push_back(eventProcessC);
+ }
+
+ // Retrieves the Xgemm kernel from the compiled binary
+ auto kernel = Kernel(program_, "XgemmBatched");
+
+ // Sets the kernel arguments
+ kernel.SetArgument(0, static_cast<int>(m_ceiled));
+ kernel.SetArgument(1, static_cast<int>(n_ceiled));
+ kernel.SetArgument(2, static_cast<int>(k_ceiled));
+ kernel.SetArgument(3, alphas());
+ kernel.SetArgument(4, betas());
+ kernel.SetArgument(5, a_temp());
+ kernel.SetArgument(6, static_cast<int>(a_one_i));
+ kernel.SetArgument(7, static_cast<int>(a_two_i));
+ kernel.SetArgument(8, b_temp());
+ kernel.SetArgument(9, static_cast<int>(b_one_i));
+ kernel.SetArgument(10, static_cast<int>(b_two_i));
+ kernel.SetArgument(11, c_temp());
+ kernel.SetArgument(12, static_cast<int>(c_one_i));
+ kernel.SetArgument(13, static_cast<int>(c_two_i));
+
+ // Computes the global and local thread sizes
+ const auto global = std::vector<size_t>{
+ (c_one_i * db_["MDIMC"]) / db_["MWG"],
+ (c_two_i * db_["NDIMC"]) / db_["NWG"],
+ batch_count
+ };
+ const auto local = std::vector<size_t>{db_["MDIMC"], db_["NDIMC"], 1};
+
+ // Launches the kernel
+ auto eventKernel = Event();
+ auto eventPointer = eventKernel.pointer();
+ RunKernel(kernel, queue_, device_, global, local, eventPointer, eventWaitList);
+
+ // Runs the post-processing kernel if needed
+ if (!c_no_temp) {
+ eventWaitList.push_back(eventKernel);
+ PadCopyTransposeMatrixBatched(queue_, device_, db_, event_, eventWaitList,
+ c_one_i, c_two_i, c_one_i, c_offsets_i_device, c_temp,
+ c_one, c_two, c_ld, c_offsets_device, c_buffer,
+ program_, false, c_do_transpose, false, batch_count);
+ }
+}
+
+// =================================================================================================
+
+// The direct version of batched GEMM, requiring just one kernel, no pre or post-processing kernels.
+template <typename T>
+void XgemmBatched<T>::BatchedGemmDirect(const size_t m, const size_t n, const size_t k,
+ const Buffer<T> &alphas,
+ const Buffer<T> &a_buffer, const std::vector<int> &a_offsets, const size_t a_ld,
+ const Buffer<T> &b_buffer, const std::vector<int> &b_offsets, const size_t b_ld,
+ const Buffer<T> &betas,
+ const Buffer<T> &c_buffer, const std::vector<int> &c_offsets, const size_t c_ld,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t batch_count) {
+
+ // Uploads the offsets to the device
auto a_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto b_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto c_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
- auto alphas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
- auto betas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
- a_offsets_device.Write(queue_, batch_count, a_offsets_int);
- b_offsets_device.Write(queue_, batch_count, b_offsets_int);
- c_offsets_device.Write(queue_, batch_count, c_offsets_int);
- alphas_device.Write(queue_, batch_count, alphas);
- betas_device.Write(queue_, batch_count, betas);
+ a_offsets_device.Write(queue_, batch_count, a_offsets);
+ b_offsets_device.Write(queue_, batch_count, b_offsets);
+ c_offsets_device.Write(queue_, batch_count, c_offsets);
// Retrieves the proper XgemmDirect kernel from the compiled binary
const auto name = (a_do_transpose) ? (b_do_transpose ? "XgemmDirectBatchedTT" : "XgemmDirectBatchedTN") :
@@ -110,8 +308,8 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans
kernel.SetArgument(0, static_cast<int>(m));
kernel.SetArgument(1, static_cast<int>(n));
kernel.SetArgument(2, static_cast<int>(k));
- kernel.SetArgument(3, alphas_device());
- kernel.SetArgument(4, betas_device());
+ kernel.SetArgument(3, alphas());
+ kernel.SetArgument(4, betas());
kernel.SetArgument(5, a_buffer());
kernel.SetArgument(6, a_offsets_device());
kernel.SetArgument(7, static_cast<int>(a_ld));
diff --git a/src/routines/levelx/xgemmbatched.hpp b/src/routines/levelx/xgemmbatched.hpp
index 710011d8..6136dd5f 100644
--- a/src/routines/levelx/xgemmbatched.hpp
+++ b/src/routines/levelx/xgemmbatched.hpp
@@ -38,6 +38,31 @@ class XgemmBatched: public Routine {
const std::vector<T> &betas,
const Buffer<T> & c_buffer, const std::vector<size_t> &c_offsets, const size_t c_ld,
const size_t batch_count);
+
+ // Indirect version of batched GEMM (with pre and post-processing kernels)
+ void BatchedGemmIndirect(const size_t m, const size_t n, const size_t k,
+ const Buffer<T> &alphas,
+ const Buffer<T> &a_buffer, const std::vector<int> &a_offsets, const size_t a_ld,
+ const Buffer<T> &b_buffer, const std::vector<int> &b_offsets, const size_t b_ld,
+ const Buffer<T> &betas,
+ const Buffer<T> &c_buffer, const std::vector<int> &c_offsets, const size_t c_ld,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t a_one, const size_t a_two, const bool a_want_rotated,
+ const size_t b_one, const size_t b_two, const bool b_want_rotated,
+ const size_t c_one, const size_t c_two, const bool c_want_rotated,
+ const size_t batch_count);
+
+ // Direct version of batched GEMM (no pre and post-processing kernels)
+ void BatchedGemmDirect(const size_t m, const size_t n, const size_t k,
+ const Buffer<T> &alphas,
+ const Buffer<T> &a_buffer, const std::vector<int> &a_offsets, const size_t a_ld,
+ const Buffer<T> &b_buffer, const std::vector<int> &b_offsets, const size_t b_ld,
+ const Buffer<T> &betas,
+ const Buffer<T> &c_buffer, const std::vector<int> &c_offsets, const size_t c_ld,
+ const bool a_do_transpose, const bool b_do_transpose, const bool c_do_transpose,
+ const bool a_conjugate, const bool b_conjugate,
+ const size_t batch_count);
};
// =================================================================================================