From 2d1f6ba7fe842ba938490fc599b6ebd209b6560b Mon Sep 17 00:00:00 2001 From: Cedric Nugteren Date: Sun, 6 May 2018 11:35:34 +0200 Subject: Added convgemm skeleton, test infrastructure, and first reference implementation --- test/correctness/routines/levelx/xconvgemm.cpp | 26 +++ test/correctness/testblas.hpp | 10 +- test/performance/routines/levelx/xconvgemm.cpp | 33 ++++ test/routines/levelx/xconvgemm.hpp | 219 +++++++++++++++++++++++++ 4 files changed, 286 insertions(+), 2 deletions(-) create mode 100644 test/correctness/routines/levelx/xconvgemm.cpp create mode 100644 test/performance/routines/levelx/xconvgemm.cpp create mode 100644 test/routines/levelx/xconvgemm.hpp (limited to 'test') diff --git a/test/correctness/routines/levelx/xconvgemm.cpp b/test/correctness/routines/levelx/xconvgemm.cpp new file mode 100644 index 00000000..77a0f543 --- /dev/null +++ b/test/correctness/routines/levelx/xconvgemm.cpp @@ -0,0 +1,26 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// ================================================================================================= + +#include "test/correctness/testblas.hpp" +#include "test/routines/levelx/xconvgemm.hpp" + +// Main function (not within the clblast namespace) +int main(int argc, char *argv[]) { + auto errors = size_t{0}; + errors += clblast::RunTests, float, float>(argc, argv, false, "SCONVGEMM"); + errors += clblast::RunTests, double, double>(argc, argv, true, "DCONVGEMM"); + errors += clblast::RunTests, clblast::float2, clblast::float2>(argc, argv, true, "CCONVGEMM"); + errors += clblast::RunTests, clblast::double2, clblast::double2>(argc, argv, true, "ZCONVGEMM"); + errors += clblast::RunTests, clblast::half, clblast::half>(argc, argv, true, "HCONVGEMM"); + if (errors > 0) { return 1; } else { return 0; } +} + +// ================================================================================================= diff --git a/test/correctness/testblas.hpp b/test/correctness/testblas.hpp index 54b2d6f8..1d1d2ca9 100644 --- a/test/correctness/testblas.hpp +++ b/test/correctness/testblas.hpp @@ -60,6 +60,7 @@ class TestBlas: public Tester { static const std::vector kDilationSizes; static const std::vector kKernelSizes; static const std::vector kBatchCounts; + static const std::vector kNumKernels; const std::vector kOffsets; const std::vector kAlphaValues; const std::vector kBetaValues; @@ -136,6 +137,7 @@ template const std::vector TestBlas::kBatc template const std::vector TestBlas::kPadSizes = { 0, 1 }; template const std::vector TestBlas::kDilationSizes = { 1, 2 }; template const std::vector TestBlas::kKernelSizes = { 1, 3 }; +template const std::vector TestBlas::kNumKernels = { 1, 2 }; // Test settings for the invalid tests template const std::vector TestBlas::kInvalidIncrements = { 0, 1 }; @@ -241,6 +243,7 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na auto dilation_hs = std::vector{args.dilation_h}; auto dilation_ws = std::vector{args.dilation_w}; auto batch_counts = std::vector{args.batch_count}; + auto num_kernelss = std::vector{args.num_kernels}; auto x_sizes = std::vector{args.x_size}; auto y_sizes = std::vector{args.y_size}; auto a_sizes = std::vector{args.a_size}; @@ -296,6 +299,7 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na if (option == kArgDilationH) { dilation_hs = tester.kDilationSizes; } if (option == kArgDilationW) { dilation_ws = tester.kDilationSizes; } if (option == kArgBatchCount) { batch_counts = tester.kBatchCounts; } + if (option == kArgNumKernels) { num_kernelss = tester.kNumKernels; } if (option == kArgXOffset) { x_sizes = tester.kVecSizes; } if (option == kArgYOffset) { y_sizes = tester.kVecSizes; } @@ -350,8 +354,10 @@ size_t RunTests(int argc, char *argv[], const bool silent, const std::string &na for (auto &dilation_h: dilation_hs) { r_args.dilation_h = dilation_h; for (auto &dilation_w: dilation_ws) { r_args.dilation_w = dilation_w; for (auto &batch_count: batch_counts) { r_args.batch_count = batch_count; - C::SetSizes(r_args, tester.queue_); - regular_test_vector.push_back(r_args); + for (auto &num_kernels: num_kernelss) { r_args.num_kernels = num_kernels; + C::SetSizes(r_args, tester.queue_); + regular_test_vector.push_back(r_args); + } } } } diff --git a/test/performance/routines/levelx/xconvgemm.cpp b/test/performance/routines/levelx/xconvgemm.cpp new file mode 100644 index 00000000..e6bbe8e8 --- /dev/null +++ b/test/performance/routines/levelx/xconvgemm.cpp @@ -0,0 +1,33 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// ================================================================================================= + +#include "test/performance/client.hpp" +#include "test/routines/levelx/xconvgemm.hpp" + +// Main function (not within the clblast namespace) +int main(int argc, char *argv[]) { + const auto command_line_args = clblast::RetrieveCommandLineArguments(argc, argv); + switch(clblast::GetPrecision(command_line_args, clblast::Precision::kSingle)) { + case clblast::Precision::kHalf: + clblast::RunClient, clblast::half, clblast::half>(argc, argv); break; + case clblast::Precision::kSingle: + clblast::RunClient, float, float>(argc, argv); break; + case clblast::Precision::kDouble: + clblast::RunClient, double, double>(argc, argv); break; + case clblast::Precision::kComplexSingle: + clblast::RunClient, clblast::float2, clblast::float2>(argc, argv); break; + case clblast::Precision::kComplexDouble: + clblast::RunClient, clblast::double2, clblast::double2>(argc, argv); break; + } + return 0; +} + +// ================================================================================================= diff --git a/test/routines/levelx/xconvgemm.hpp b/test/routines/levelx/xconvgemm.hpp new file mode 100644 index 00000000..6ca5965b --- /dev/null +++ b/test/routines/levelx/xconvgemm.hpp @@ -0,0 +1,219 @@ + +// ================================================================================================= +// This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This +// project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- +// width of 100 characters per line. +// +// Author(s): +// Cedric Nugteren +// +// This file implements a class with static methods to describe the Xconvgemm routine. Examples of +// such 'descriptions' are how to calculate the size a of buffer or how to run the routine. These +// static methods are used by the correctness tester and the performance tester. +// +// ================================================================================================= + +#ifndef CLBLAST_TEST_ROUTINES_XCONVGEMM_H_ +#define CLBLAST_TEST_ROUTINES_XCONVGEMM_H_ + +#include "test/routines/common.hpp" + +namespace clblast { +// ================================================================================================= + +// See comment at top of file for a description of the class +template +class TestXconvgemm { +public: + + // The BLAS level: 4 for the extra routines + static size_t BLASLevel() { return 4; } + + // The list of arguments relevant for this routine + static std::vector GetOptions() { + return {kArgChannels, kArgHeight, kArgWidth, kArgKernelH, kArgKernelW, kArgPadH, kArgPadW, + kArgStrideH, kArgStrideW, kArgDilationH, kArgDilationW, kArgNumKernels, kArgBatchCount, + kArgAOffset, kArgBOffset, kArgCOffset}; + } + static std::vector BuffersIn() { return {kBufMatA, kBufMatB, kBufMatC}; } + static std::vector BuffersOut() { return {kBufMatC}; } + + // Describes how to obtain the sizes of the buffers + static size_t OutputHeight(const Arguments &args) { + const auto size = args.height + 2 * args.pad_h; + const auto padding = args.dilation_h * (args.kernel_h - 1) + 1; + if (size >= padding) { return (size - padding) / args.stride_h + 1; } + return 1; + } + static size_t OutputWidth(const Arguments &args) { + const auto size = args.width + 2 * args.pad_w; + const auto padding = args.dilation_w * (args.kernel_w - 1) + 1; + if (size >= padding) { return (size - padding) / args.stride_w + 1; } + return 1; + } + static size_t NumPatches(const Arguments &args) { + return OutputHeight(args) * OutputWidth(args) * args.channels; + } + static size_t GetSizeA(const Arguments &args) { // 4D: NCHW == batch-channel-height-width + return args.batch_count * args.channels * args.height * args.width + args.a_offset; + } + static size_t GetSizeB(const Arguments &args) { // 4D: KCHW == kernel-channel-height-width + return args.num_kernels * args.channels * args.kernel_h * args.kernel_w + args.b_offset; + } + static size_t GetSizeC(const Arguments &args) { // 4D: NCHW == batch-channel-height-width + return args.batch_count * args.num_kernels * OutputHeight(args) * OutputWidth(args) + args.c_offset; + } + + // Describes how to set the sizes of all the buffers + static void SetSizes(Arguments &args, Queue&) { + args.a_size = GetSizeA(args); + args.b_size = GetSizeB(args); + args.c_size = GetSizeC(args); + } + + // Describes what the default values of the leading dimensions of the matrices are + static size_t DefaultLDA(const Arguments &) { return 1; } // N/A for this routine + static size_t DefaultLDB(const Arguments &) { return 1; } // N/A for this routine + static size_t DefaultLDC(const Arguments &) { return 1; } // N/A for this routine + + // Describes which transpose options are relevant for this routine + using Transposes = std::vector; + static Transposes GetATransposes(const Transposes &) { return {}; } // N/A for this routine + static Transposes GetBTransposes(const Transposes &) { return {}; } // N/A for this routine + + // Describes how to prepare the input data + static void PrepareData(const Arguments&, Queue&, const int, std::vector&, + std::vector&, std::vector&, std::vector&, std::vector&, + std::vector&, std::vector&) {} // N/A for this routine + + // Describes how to run the CLBlast routine + static StatusCode RunRoutine(const Arguments &args, Buffers &buffers, Queue &queue) { +#ifdef OPENCL_API + auto queue_plain = queue(); + auto event = cl_event{}; + auto status = Convgemm(args.channels, args.height, args.width, + args.kernel_h, args.kernel_w, + args.pad_h, args.pad_w, + args.stride_h, args.stride_w, + args.dilation_h, args.dilation_w, + args.num_kernels, args.batch_count, + buffers.a_mat(), args.a_offset, + buffers.b_mat(), args.b_offset, + buffers.c_mat(), args.c_offset, + &queue_plain, &event); + if (status == StatusCode::kSuccess) { clWaitForEvents(1, &event); clReleaseEvent(event); } +#elif CUDA_API + auto status = Convgemm(args.channels, args.height, args.width, + args.kernel_h, args.kernel_w, + args.pad_h, args.pad_w, + args.stride_h, args.stride_w, + args.dilation_h, args.dilation_w, + args.num_kernels, args.batch_count, + buffers.a_mat(), args.a_offset, + buffers.b_mat(), args.b_offset, + buffers.c_mat(), args.c_offset, + queue.GetContext()(), queue.GetDevice()()); + cuStreamSynchronize(queue()); +#endif + return status; + } + + // Describes how to run a naive version of the routine (for correctness/performance comparison). + // Note that a proper clBLAS or CPU BLAS comparison is not available for non-BLAS routines. + static StatusCode RunReference1(const Arguments &args, Buffers &buffers, Queue &queue) { + auto buffers_host = BuffersHost(); + DeviceToHost(args, buffers, buffers_host, queue, BuffersIn()); + const auto status = RunReference(args, buffers_host); + HostToDevice(args, buffers, buffers_host, queue, BuffersOut()); + return status; + } + + static StatusCode RunReference2(const Arguments &args, BuffersHost &buffers_host, Queue&) { + return RunReference(args, buffers_host); + } + static StatusCode RunReference3(const Arguments &, BuffersCUDA &, Queue &) { + return StatusCode::kUnknownError; + } + + // Describes how to download the results of the computation (more importantly: which buffer) + static std::vector DownloadResult(const Arguments &args, Buffers &buffers, Queue &queue) { + std::vector result(args.c_size, static_cast(0)); + buffers.c_mat.Read(queue, args.c_size, result); + return result; + } + + // Describes how to compute the indices of the result buffer + static size_t ResultID1(const Arguments &args) { return OutputHeight(args) * OutputWidth(args); } + static size_t ResultID2(const Arguments &args) { return args.num_kernels * args.batch_count; } + static size_t GetResultIndex(const Arguments &args, const size_t id1, const size_t id2) { + return id1 + OutputHeight(args) * OutputWidth(args) * id2 + args.c_offset; + } + + // Describes how to compute performance metrics + static size_t GetFlops(const Arguments &args) { + return args.batch_count; // TODO + } + static size_t GetBytes(const Arguments &args) { + return (GetSizeA(args) + GetSizeB(args) + GetSizeC(args)) * sizeof(T); + } +}; + +// ================================================================================================= + +template +StatusCode RunReference(const Arguments &args, BuffersHost &buffers_host) { + const auto output_h = TestXconvgemm::OutputHeight(args); + const auto output_w = TestXconvgemm::OutputWidth(args); + for (auto batch_id = size_t{0}; batch_id < args.batch_count; ++batch_id) { + for (auto co_id = size_t{0}; co_id < args.num_kernels; ++co_id) { // output channels == num-kernels + for (auto ho_id = size_t{0}; ho_id < output_h; ++ho_id) { // image height + for (auto wo_id = size_t{0}; wo_id < output_w; ++wo_id) { // image width + auto result = ConstantZero(); + + // 3D convolution + for (auto ci_id = size_t{0}; ci_id < args.channels; ++ci_id) { // input channels + for (auto kh_id = size_t{0}; kh_id < args.kernel_h; ++kh_id) { // kernel height + for (auto kw_id = size_t{0}; kw_id < args.kernel_w; ++kw_id) { // kernel width + + // Retrieves the value from the input image + const auto hi_id = kh_id * args.dilation_h + args.stride_h * ho_id - args.pad_h; + const auto wi_id = kw_id * args.dilation_w + args.stride_w * wo_id - args.pad_w; + if (hi_id >= 0 && hi_id < args.height && + wi_id >= 0 && wi_id < args.width) { + const auto input_index = wi_id + args.width * ( + hi_id + args.height * ( + ci_id + args.channels * ( + batch_id))); + const auto input_value = buffers_host.a_mat[input_index + args.a_offset]; + + // Multiplies with the kernel tensor + const auto kernel_index = kw_id + args.kernel_w * ( + kh_id + args.kernel_h * ( + ci_id + args.channels * ( + co_id))); + const auto kernel_value = buffers_host.b_mat[kernel_index + args.b_offset]; + result += input_value * kernel_value; + + } + } + } + } + + // Sets the output value (NCHW == batch-channel-height-width) + const auto output_index = wo_id + output_w * ( + ho_id + output_h * ( + co_id + args.num_kernels * ( + batch_id))); + buffers_host.c_mat[output_index + args.c_offset] = result; + } + } + } + } + return StatusCode::kSuccess; +} + +// ================================================================================================= +} // namespace clblast + +// CLBLAST_TEST_ROUTINES_XCONVGEMM_H_ +#endif -- cgit v1.2.3