summaryrefslogtreecommitdiff
path: root/src/kernels/level3/convert_hermitian.opencl
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2016-09-13 21:14:51 +0200
committerGitHub <noreply@github.com>2016-09-13 21:14:51 +0200
commitf07ac22f5b57d22756d779d2e53620f988d786ee (patch)
treee8bcbc331683ca6fd807f5a5b83bb05c6e6fed69 /src/kernels/level3/convert_hermitian.opencl
parent7c13bacf129291e3e295ecb6e833788477085fa0 (diff)
parent4b94afda941a86f363064ff02f97e21eb9618794 (diff)
Merge pull request #99 from CNugteren/development
Update to version 0.9.0
Diffstat (limited to 'src/kernels/level3/convert_hermitian.opencl')
-rw-r--r--src/kernels/level3/convert_hermitian.opencl28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/kernels/level3/convert_hermitian.opencl b/src/kernels/level3/convert_hermitian.opencl
index 53cc161a..ed2ded98 100644
--- a/src/kernels/level3/convert_hermitian.opencl
+++ b/src/kernels/level3/convert_hermitian.opencl
@@ -20,13 +20,13 @@ R"(
// Kernel to populate a squared hermitian matrix, given that the triangle which holds the data is
// stored as the lower-triangle of the input matrix. This uses the padding kernel's parameters.
-__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1)))
-__kernel void HermLowerToSquared(const int src_dim,
- const int src_ld, const int src_offset,
- __global const real* restrict src,
- const int dest_dim,
- const int dest_ld, const int dest_offset,
- __global real* dest) {
+__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1)))
+void HermLowerToSquared(const int src_dim,
+ const int src_ld, const int src_offset,
+ __global const real* restrict src,
+ const int dest_dim,
+ const int dest_ld, const int dest_offset,
+ __global real* dest) {
// Loops over the work per thread in both dimensions
#pragma unroll
@@ -59,13 +59,13 @@ __kernel void HermLowerToSquared(const int src_dim,
}
// Same as above, but now the matrix' data is stored in the upper-triangle
-__attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1)))
-__kernel void HermUpperToSquared(const int src_dim,
- const int src_ld, const int src_offset,
- __global const real* restrict src,
- const int dest_dim,
- const int dest_ld, const int dest_offset,
- __global real* dest) {
+__kernel __attribute__((reqd_work_group_size(PAD_DIMX, PAD_DIMY, 1)))
+void HermUpperToSquared(const int src_dim,
+ const int src_ld, const int src_offset,
+ __global const real* restrict src,
+ const int dest_dim,
+ const int dest_ld, const int dest_offset,
+ __global real* dest) {
// Loops over the work per thread in both dimensions
#pragma unroll