summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2018-05-06 11:35:34 +0200
committerCedric Nugteren <web@cedricnugteren.nl>2018-05-06 11:35:34 +0200
commit2d1f6ba7fe842ba938490fc599b6ebd209b6560b (patch)
treef1a284e5dc0163b7fed938a3efeb39432b9d3788 /src
parent2776d761768295b01a8be7c333dbb337805d7f77 (diff)
Added convgemm skeleton, test infrastructure, and first reference implementation
Diffstat (limited to 'src')
-rw-r--r--src/clblast.cpp20
-rw-r--r--src/clblast_cuda.cpp22
-rw-r--r--src/routines/levelx/xconvgemm.cpp68
-rw-r--r--src/routines/levelx/xconvgemm.hpp48
-rw-r--r--src/routines/routines.hpp1
-rw-r--r--src/utilities/utilities.hpp4
6 files changed, 150 insertions, 13 deletions
diff --git a/src/clblast.cpp b/src/clblast.cpp
index 026285bb..3a96136a 100644
--- a/src/clblast.cpp
+++ b/src/clblast.cpp
@@ -2254,12 +2254,20 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si
// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/CCONVGEMM/ZCONVGEMM/HCONVGEMM
template <typename T>
-StatusCode Convgemm(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
- const cl_mem, const size_t,
- const cl_mem, const size_t,
- cl_mem, const size_t,
- cl_command_queue*, cl_event*) {
- return StatusCode::kNotImplemented;
+StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
+ const cl_mem im_buffer, const size_t im_offset,
+ const cl_mem kernel_buffer, const size_t kernel_offset,
+ cl_mem result_buffer, const size_t result_offset,
+ cl_command_queue* queue, cl_event* event) {
+ try {
+ auto queue_cpp = Queue(*queue);
+ auto routine = Xconvgemm<T>(queue_cpp, event);
+ routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
+ Buffer<T>(im_buffer), im_offset,
+ Buffer<T>(kernel_buffer), kernel_offset,
+ Buffer<T>(result_buffer), result_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
}
template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
const cl_mem, const size_t,
diff --git a/src/clblast_cuda.cpp b/src/clblast_cuda.cpp
index f89fb77d..5aab1626 100644
--- a/src/clblast_cuda.cpp
+++ b/src/clblast_cuda.cpp
@@ -2352,12 +2352,22 @@ template StatusCode PUBLIC_API Im2col<half>(const size_t, const size_t, const si
// Batched convolution as GEMM (non-BLAS function): SCONVGEMM/DCONVGEMM/CCONVGEMM/ZCONVGEMM/HCONVGEMM
template <typename T>
-StatusCode Convgemm(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
- const CUdeviceptr, const size_t,
- const CUdeviceptr, const size_t,
- CUdeviceptr, const size_t,
- const CUcontext, const CUdevice) {
- return StatusCode::kNotImplemented;
+StatusCode Convgemm(const size_t channels, const size_t height, const size_t width, const size_t kernel_h, const size_t kernel_w, const size_t pad_h, const size_t pad_w, const size_t stride_h, const size_t stride_w, const size_t dilation_h, const size_t dilation_w, const size_t num_kernels, const size_t batch_count,
+ const CUdeviceptr im_buffer, const size_t im_offset,
+ const CUdeviceptr kernel_buffer, const size_t kernel_offset,
+ CUdeviceptr result_buffer, const size_t result_offset,
+ const CUcontext context, const CUdevice device) {
+ try {
+ const auto context_cpp = Context(context);
+ const auto device_cpp = Device(device);
+ auto queue_cpp = Queue(context_cpp, device_cpp);
+ auto routine = Xconvgemm<T>(queue_cpp, nullptr);
+ routine.DoConvgemm(channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, num_kernels, batch_count,
+ Buffer<T>(im_buffer), im_offset,
+ Buffer<T>(kernel_buffer), kernel_offset,
+ Buffer<T>(result_buffer), result_offset);
+ return StatusCode::kSuccess;
+ } catch (...) { return DispatchException(); }
}
template StatusCode PUBLIC_API Convgemm<float>(const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t, const size_t,
const CUdeviceptr, const size_t,
diff --git a/src/routines/levelx/xconvgemm.cpp b/src/routines/levelx/xconvgemm.cpp
new file mode 100644
index 00000000..2676dbda
--- /dev/null
+++ b/src/routines/levelx/xconvgemm.cpp
@@ -0,0 +1,68 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the Xconvgemm class (see the header for information about the class).
+//
+// =================================================================================================
+
+#include "routines/levelx/xconvgemm.hpp"
+
+#include <string>
+#include <vector>
+
+namespace clblast {
+// =================================================================================================
+
+// Constructor: forwards to base class constructor
+template <typename T>
+Xconvgemm<T>::Xconvgemm(Queue &queue, EventPointer event, const std::string &name):
+ Routine(queue, event, name, {"Copy"}, PrecisionValue<T>(), {}, {
+#include "../../kernels/levelx/im2col.opencl"
+ }) {
+}
+
+// =================================================================================================
+
+template <typename T>
+void Xconvgemm<T>::DoConvgemm(const size_t channels, const size_t height, const size_t width,
+ const size_t kernel_h, const size_t kernel_w, const size_t pad_h,
+ const size_t pad_w, const size_t stride_h, const size_t stride_w,
+ const size_t dilation_h, const size_t dilation_w,
+ const size_t num_kernels, const size_t batch_count,
+ const Buffer<T> &im_buffer, const size_t im_offset,
+ const Buffer<T> &kernel_buffer, const size_t kernel_offset,
+ const Buffer<T> &result_buffer, const size_t result_offset) {
+
+ // Makes sure all dimensions are larger than zero
+ if ((channels == 0) || (height == 0) || (width == 0) || (num_kernels == 0) || (batch_count == 0)) {
+ throw BLASError(StatusCode::kInvalidDimension);
+ }
+
+ // Sets the output height and width
+ const auto size_h = height + 2 * pad_h;
+ const auto padding_h = dilation_h * (kernel_h - 1) + 1;
+ const auto output_h = (size_h >= padding_h) ? (size_h - padding_h) / stride_h + 1 : 1;
+ const auto size_w = width + 2 * pad_w;
+ const auto padding_w = dilation_w * (kernel_w - 1) + 1;
+ const auto output_w = (size_w >= padding_w) ? (size_w - padding_w) / stride_w + 1 : 1;
+
+ throw BLASError(StatusCode::kNotImplemented);
+}
+
+// =================================================================================================
+
+// Compiles the templated class
+template class Xconvgemm<half>;
+template class Xconvgemm<float>;
+template class Xconvgemm<double>;
+template class Xconvgemm<float2>;
+template class Xconvgemm<double2>;
+
+// =================================================================================================
+} // namespace clblast
diff --git a/src/routines/levelx/xconvgemm.hpp b/src/routines/levelx/xconvgemm.hpp
new file mode 100644
index 00000000..01795ea8
--- /dev/null
+++ b/src/routines/levelx/xconvgemm.hpp
@@ -0,0 +1,48 @@
+
+// =================================================================================================
+// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This
+// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max-
+// width of 100 characters per line.
+//
+// Author(s):
+// Cedric Nugteren <www.cedricnugteren.nl>
+//
+// This file implements the Xconvgemm routine. The precision is implemented as a template argument.
+// This implements batched convolution of a 4D input 'image' tensor, a 3D input 'kernel' matrix,
+// resulting in a 4D output 'result' tensor.
+//
+// =================================================================================================
+
+#ifndef CLBLAST_ROUTINES_XCONVGEMM_H_
+#define CLBLAST_ROUTINES_XCONVGEMM_H_
+
+#include "routine.hpp"
+
+namespace clblast {
+// =================================================================================================
+
+// See comment at top of file for a description of the class
+template <typename T>
+class Xconvgemm: public Routine {
+ public:
+
+ // Constructor
+ Xconvgemm(Queue &queue, EventPointer event, const std::string &name = "CONVGEMM");
+
+ // Templated-precision implementation of the routine
+ void DoConvgemm(const size_t channels, const size_t height, const size_t width,
+ const size_t kernel_h, const size_t kernel_w,
+ const size_t pad_h, const size_t pad_w,
+ const size_t stride_h, const size_t stride_w,
+ const size_t dilation_h, const size_t dilation_w,
+ const size_t num_kernels, const size_t batch_count,
+ const Buffer<T> &im_buffer, const size_t im_offset,
+ const Buffer<T> &kernel_buffer, const size_t kernel_offset,
+ const Buffer<T> &result_buffer, const size_t result_offset);
+};
+
+// =================================================================================================
+} // namespace clblast
+
+// CLBLAST_ROUTINES_XCONVGEMM_H_
+#endif
diff --git a/src/routines/routines.hpp b/src/routines/routines.hpp
index 2ab16a75..e080ed47 100644
--- a/src/routines/routines.hpp
+++ b/src/routines/routines.hpp
@@ -70,6 +70,7 @@
#include "routines/levelx/xhad.hpp"
#include "routines/levelx/xomatcopy.hpp"
#include "routines/levelx/xim2col.hpp"
+#include "routines/levelx/xconvgemm.hpp"
#include "routines/levelx/xaxpybatched.hpp"
#include "routines/levelx/xgemmbatched.hpp"
#include "routines/levelx/xgemmstridedbatched.hpp"
diff --git a/src/utilities/utilities.hpp b/src/utilities/utilities.hpp
index 0edf77fe..2d2cd62e 100644
--- a/src/utilities/utilities.hpp
+++ b/src/utilities/utilities.hpp
@@ -84,6 +84,7 @@ constexpr auto kArgImaxOffset = "offimax";
constexpr auto kArgAlpha = "alpha";
constexpr auto kArgBeta = "beta";
constexpr auto kArgBatchCount = "batch_num";
+constexpr auto kArgNumKernels = "num_kernels";
// Constants for im2col
constexpr auto kArgChannels = "channels";
@@ -195,7 +196,7 @@ struct Arguments {
size_t imax_offset = 0;
T alpha = ConstantOne<T>();
T beta = ConstantOne<T>();
- // Arguments for im2col
+ // Arguments for im2col and convgemm
size_t channels = 1;
size_t height = 1;
size_t width = 1;
@@ -207,6 +208,7 @@ struct Arguments {
size_t stride_w = 1;
size_t dilation_h = 1;
size_t dilation_w = 1;
+ size_t num_kernels = 1;
// Batch-specific arguments
size_t batch_count = 1;
std::vector<size_t> x_offsets; // = {0};