diff options
author | Cedric Nugteren <web@cedricnugteren.nl> | 2017-04-13 21:31:27 +0200 |
---|---|---|
committer | Cedric Nugteren <web@cedricnugteren.nl> | 2017-04-13 21:31:27 +0200 |
commit | f7f8ec644f51d16f888b6a7086009b79c0beef8f (patch) | |
tree | 88f652bba2a980b44010f415ed5d48af15d0b063 /test/wrapper_cuda.hpp | |
parent | f24c142948fc71d8b37826c1275259668fe0d0e5 (diff) |
Fixed CUDA malloc and cuBLAS handles: cuBLAS as a performance-reference now works
Diffstat (limited to 'test/wrapper_cuda.hpp')
-rw-r--r-- | test/wrapper_cuda.hpp | 96 |
1 files changed, 67 insertions, 29 deletions
diff --git a/test/wrapper_cuda.hpp b/test/wrapper_cuda.hpp index 509de9d1..51f897c4 100644 --- a/test/wrapper_cuda.hpp +++ b/test/wrapper_cuda.hpp @@ -29,17 +29,47 @@ namespace clblast { // ================================================================================================= +#ifdef CLBLAST_REF_CUBLAS + template <typename T> + void cublasSetup(Arguments<T> &args) { + cudaSetDevice(static_cast<int>(args.device_id)); + auto status = cublasCreate(reinterpret_cast<cublasHandle_t*>(&args.cublas_handle)); + if (status != CUBLAS_STATUS_SUCCESS) { + throw std::runtime_error("CUDA cublasCreate error"); + } + } +#endif + +#ifdef CLBLAST_REF_CUBLAS + template <typename T> + void cublasTeardown(Arguments<T> &args) { + auto status = cublasDestroy(reinterpret_cast<cublasHandle_t>(args.cublas_handle)); + if (status != CUBLAS_STATUS_SUCCESS) { + throw std::runtime_error("CUDA cublasDestroy error"); + } + } +#endif + +// ================================================================================================= + // Copies data from the CUDA device to the host and frees-up the CUDA memory afterwards #ifdef CLBLAST_REF_CUBLAS template <typename T> - void CUDAToHost(T* buffer_cuda, std::vector<T> &buffer_host, const size_t size) { - cudaMemcpy( + void CUDAToHost(T** buffer_cuda, std::vector<T> &buffer_host, const size_t size) { + auto status1 = cudaMemcpy( reinterpret_cast<void*>(buffer_host.data()), - reinterpret_cast<void*>(buffer_cuda), + reinterpret_cast<void*>(*buffer_cuda), size*sizeof(T), cudaMemcpyDeviceToHost ); - cudaFree(buffer_cuda); + if (status1 != cudaSuccess) { + throw std::runtime_error("CUDA cudaMemcpy error with status: "+ToString(static_cast<int>(status1))); + } + auto status2 = cudaFree(*buffer_cuda); + if (status2 != cudaSuccess) { + throw std::runtime_error("CUDA cudaFree error with status: "+ToString(static_cast<int>(status2))); + } + *buffer_cuda = nullptr; } #else template <typename T> void CUDAToHost(T*, const std::vector<T>&, const size_t) { } @@ -48,14 +78,22 @@ namespace clblast { // Allocates space on the CUDA device and copies in data from the host #ifdef CLBLAST_REF_CUBLAS template <typename T> - void HostToCUDA(T* buffer_cuda, std::vector<T> &buffer_host, const size_t size) { - cudaMalloc(reinterpret_cast<void**>(&buffer_cuda), size*sizeof(T)); - cudaMemcpy( - reinterpret_cast<void*>(buffer_cuda), + void HostToCUDA(T** buffer_cuda, std::vector<T> &buffer_host, const size_t size) { + if (*buffer_cuda == nullptr) { + auto status1 = cudaMalloc(reinterpret_cast<void**>(buffer_cuda), size*sizeof(T)); + if (status1 != cudaSuccess) { + throw std::runtime_error("CUDA cudaMalloc error with status: "+ToString(static_cast<int>(status1))); + } + } + auto status2 = cudaMemcpy( + reinterpret_cast<void*>(*buffer_cuda), reinterpret_cast<void*>(buffer_host.data()), size*sizeof(T), cudaMemcpyHostToDevice ); + if (status2 != cudaSuccess) { + throw std::runtime_error("CUDA cudaMemcpy error with status: "+ToString(static_cast<int>(status2))); + } } #else template <typename T> void HostToCUDA(T*, const std::vector<T>&, const size_t) { } @@ -65,26 +103,26 @@ namespace clblast { template <typename T> struct BuffersCUDA { - T* x_vec; - T* y_vec; - T* a_mat; - T* b_mat; - T* c_mat; - T* ap_mat; - T* scalar; + T* x_vec = nullptr; + T* y_vec = nullptr; + T* a_mat = nullptr; + T* b_mat = nullptr; + T* c_mat = nullptr; + T* ap_mat = nullptr; + T* scalar = nullptr; }; template <typename T, typename U> void CUDAToHost(const Arguments<U> &args, BuffersCUDA<T> &buffers, BuffersHost<T> &buffers_host, const std::vector<std::string> &names) { for (auto &name: names) { - if (name == kBufVecX) { buffers_host.x_vec = std::vector<T>(args.x_size, static_cast<T>(0)); CUDAToHost(buffers.x_vec, buffers_host.x_vec, args.x_size); } - else if (name == kBufVecY) { buffers_host.y_vec = std::vector<T>(args.y_size, static_cast<T>(0)); CUDAToHost(buffers.y_vec, buffers_host.y_vec, args.y_size); } - else if (name == kBufMatA) { buffers_host.a_mat = std::vector<T>(args.a_size, static_cast<T>(0)); CUDAToHost(buffers.a_mat, buffers_host.a_mat, args.a_size); } - else if (name == kBufMatB) { buffers_host.b_mat = std::vector<T>(args.b_size, static_cast<T>(0)); CUDAToHost(buffers.b_mat, buffers_host.b_mat, args.b_size); } - else if (name == kBufMatC) { buffers_host.c_mat = std::vector<T>(args.c_size, static_cast<T>(0)); CUDAToHost(buffers.c_mat, buffers_host.c_mat, args.c_size); } - else if (name == kBufMatAP) { buffers_host.ap_mat = std::vector<T>(args.ap_size, static_cast<T>(0)); CUDAToHost(buffers.ap_mat, buffers_host.ap_mat, args.ap_size); } - else if (name == kBufScalar) { buffers_host.scalar = std::vector<T>(args.scalar_size, static_cast<T>(0)); CUDAToHost(buffers.scalar, buffers_host.scalar, args.scalar_size); } + if (name == kBufVecX) { buffers_host.x_vec = std::vector<T>(args.x_size, static_cast<T>(0)); CUDAToHost(&buffers.x_vec, buffers_host.x_vec, args.x_size); } + else if (name == kBufVecY) { buffers_host.y_vec = std::vector<T>(args.y_size, static_cast<T>(0)); CUDAToHost(&buffers.y_vec, buffers_host.y_vec, args.y_size); } + else if (name == kBufMatA) { buffers_host.a_mat = std::vector<T>(args.a_size, static_cast<T>(0)); CUDAToHost(&buffers.a_mat, buffers_host.a_mat, args.a_size); } + else if (name == kBufMatB) { buffers_host.b_mat = std::vector<T>(args.b_size, static_cast<T>(0)); CUDAToHost(&buffers.b_mat, buffers_host.b_mat, args.b_size); } + else if (name == kBufMatC) { buffers_host.c_mat = std::vector<T>(args.c_size, static_cast<T>(0)); CUDAToHost(&buffers.c_mat, buffers_host.c_mat, args.c_size); } + else if (name == kBufMatAP) { buffers_host.ap_mat = std::vector<T>(args.ap_size, static_cast<T>(0)); CUDAToHost(&buffers.ap_mat, buffers_host.ap_mat, args.ap_size); } + else if (name == kBufScalar) { buffers_host.scalar = std::vector<T>(args.scalar_size, static_cast<T>(0)); CUDAToHost(&buffers.scalar, buffers_host.scalar, args.scalar_size); } else { throw std::runtime_error("Invalid buffer name"); } } } @@ -93,13 +131,13 @@ template <typename T, typename U> void HostToCUDA(const Arguments<U> &args, BuffersCUDA<T> &buffers, BuffersHost<T> &buffers_host, const std::vector<std::string> &names) { for (auto &name: names) { - if (name == kBufVecX) { HostToCUDA(buffers.x_vec, buffers_host.x_vec, args.x_size); } - else if (name == kBufVecY) { HostToCUDA(buffers.y_vec, buffers_host.y_vec, args.y_size); } - else if (name == kBufMatA) { HostToCUDA(buffers.a_mat, buffers_host.a_mat, args.a_size); } - else if (name == kBufMatB) { HostToCUDA(buffers.b_mat, buffers_host.b_mat, args.b_size); } - else if (name == kBufMatC) { HostToCUDA(buffers.c_mat, buffers_host.c_mat, args.c_size); } - else if (name == kBufMatAP) { HostToCUDA(buffers.ap_mat, buffers_host.ap_mat, args.ap_size); } - else if (name == kBufScalar) { HostToCUDA(buffers.scalar, buffers_host.scalar, args.scalar_size); } + if (name == kBufVecX) { HostToCUDA(&buffers.x_vec, buffers_host.x_vec, args.x_size); } + else if (name == kBufVecY) { HostToCUDA(&buffers.y_vec, buffers_host.y_vec, args.y_size); } + else if (name == kBufMatA) { HostToCUDA(&buffers.a_mat, buffers_host.a_mat, args.a_size); } + else if (name == kBufMatB) { HostToCUDA(&buffers.b_mat, buffers_host.b_mat, args.b_size); } + else if (name == kBufMatC) { HostToCUDA(&buffers.c_mat, buffers_host.c_mat, args.c_size); } + else if (name == kBufMatAP) { HostToCUDA(&buffers.ap_mat, buffers_host.ap_mat, args.ap_size); } + else if (name == kBufScalar) { HostToCUDA(&buffers.scalar, buffers_host.scalar, args.scalar_size); } else { throw std::runtime_error("Invalid buffer name"); } } } |