summaryrefslogtreecommitdiff
path: root/src/kernels
diff options
context:
space:
mode:
authorCNugteren <web@cedricnugteren.nl>2015-06-14 11:15:53 +0200
committerCNugteren <web@cedricnugteren.nl>2015-06-14 11:15:53 +0200
commit294a3e3d410c87ffcc7fc550e09b6d45c71a0af8 (patch)
treed68a45bb8312aabba9589bb1c51b2c6ffe0dc504 /src/kernels
parentab0064dab76c83ee9820acb62fa914c493c2563d (diff)
Split the three variations of the GEMV kernel for maximal tuning freedom
Diffstat (limited to 'src/kernels')
-rw-r--r--src/kernels/xgemv.opencl342
1 files changed, 209 insertions, 133 deletions
diff --git a/src/kernels/xgemv.opencl b/src/kernels/xgemv.opencl
index 26e7587f..b1b2fc69 100644
--- a/src/kernels/xgemv.opencl
+++ b/src/kernels/xgemv.opencl
@@ -19,42 +19,63 @@ R"(
// Parameters set by the tuner or by the database. Here they are given a basic default value in case
// this kernel file is used outside of the CLBlast library.
-#ifndef WGS
- #define WGS 64 // The local work-group size
+
+// 1: For the full version of the kernel
+#ifndef WGS1
+ #define WGS1 64 // The local work-group size
+#endif
+#ifndef WPT1
+ #define WPT1 1 // The amount of work-per-thread
+#endif
+
+// 2: For the fast version
+#ifndef WGS2
+ #define WGS2 64 // The local work-group size
+#endif
+#ifndef WPT2
+ #define WPT2 1 // The amount of work-per-thread
+#endif
+#ifndef VW2
+ #define VW2 1 // Vector width of matrix A loads
#endif
-#ifndef WPT
- #define WPT 1 // The amount of work-per-thread
+
+// 3: For the fast rotated version
+#ifndef WGS3
+ #define WGS3 64 // The local work-group size
+#endif
+#ifndef WPT3
+ #define WPT3 1 // The amount of work-per-thread
#endif
-#ifndef VW
- #define VW 1 // Vector width of matrix A loads (only for the fast kernel)
+#ifndef VW3
+ #define VW3 1 // Vector width of matrix A loads
#endif
// =================================================================================================
// Full version of the kernel
-__attribute__((reqd_work_group_size(WGS, 1, 1)))
+__attribute__((reqd_work_group_size(WGS1, 1, 1)))
__kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
- const int a_transposed,
+ const int a_rotated,
const __global real* restrict agm, const int a_offset, const int a_ld,
const __global real* restrict xgm, const int x_offset, const int x_inc,
__global real* ygm, const int y_offset, const int y_inc) {
// Local memory for the vector X
- __local real xlm[WGS];
+ __local real xlm[WGS1];
// Initializes the accumulation register
- real acc[WPT];
+ real acc[WPT1];
#pragma unroll
- for (int w=0; w<WPT; ++w) {
+ for (int w=0; w<WPT1; ++w) {
SetToZero(acc[w]);
}
// Divides the work in a main and tail section
- const int n_tail = n % WGS;
+ const int n_tail = n % WGS1;
const int n_floor = n - n_tail;
// Loops over work-group sized portions of the work
- for (int kwg=0; kwg<n_floor; kwg+=WGS) {
+ for (int kwg=0; kwg<n_floor; kwg+=WGS1) {
// Loads the vector X into local memory
const int lid = get_local_id(0);
@@ -65,21 +86,21 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
// Loops over the work per thread, and checks whether in bounds
#pragma unroll
- for (int w=0; w<WPT; ++w) {
+ for (int w=0; w<WPT1; ++w) {
const int gid = w*get_global_size(0) + get_global_id(0);
if (gid < m) {
- // The multiply-add function for the main part (divisable by WGS)
- if (a_transposed == 0) { // Not transposed
+ // The multiply-add function for the main part (divisable by WGS1)
+ if (a_rotated == 0) { // Not rotated
#pragma unroll
- for (int kl=0; kl<WGS; ++kl) {
+ for (int kl=0; kl<WGS1; ++kl) {
const int k = kwg + kl;
MultiplyAdd(acc[w], xlm[kl], agm[gid + a_ld*k + a_offset]);
}
}
else { // Transposed
#pragma unroll
- for (int kl=0; kl<WGS; ++kl) {
+ for (int kl=0; kl<WGS1; ++kl) {
const int k = kwg + kl;
MultiplyAdd(acc[w], xlm[kl], agm[k + a_ld*gid + a_offset]);
}
@@ -93,12 +114,12 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
// Loops over the work per thread, and checks whether in bounds
#pragma unroll
- for (int w=0; w<WPT; ++w) {
+ for (int w=0; w<WPT1; ++w) {
const int gid = w*get_global_size(0) + get_global_id(0);
if (gid < m) {
- // The multiply-add function for the remainder part (not divisable by WGS)
- if (a_transposed == 0) { // Not transposed
+ // The multiply-add function for the remainder part (not divisable by WGS1)
+ if (a_rotated == 0) { // Not rotated
#pragma unroll
for (int k=n_floor; k<n; ++k) {
MultiplyAdd(acc[w], xgm[k*x_inc + x_offset], agm[gid + a_ld*k + a_offset]);
@@ -120,40 +141,41 @@ __kernel void Xgemv(const int m, const int n, const real alpha, const real beta,
// =================================================================================================
// Data-widths for the 'fast' kernel
-#if VW == 1
- typedef real realV;
-#elif VW == 2
- typedef real2 realV;
-#elif VW == 4
- typedef real4 realV;
-#elif VW == 8
- typedef real8 realV;
-#elif VW == 16
- typedef real16 realV;
+#if VW2 == 1
+ typedef real realVF;
+#elif VW2 == 2
+ typedef real2 realVF;
+#elif VW2 == 4
+ typedef real4 realVF;
+#elif VW2 == 8
+ typedef real8 realVF;
+#elif VW2 == 16
+ typedef real16 realVF;
#endif
// Faster version of the kernel, assuming that:
-// --> 'm' and 'n' are multiples of WGS
+// --> 'm' and 'n' are multiples of WGS2
// --> 'a_offset' is 0
-// --> 'a_ld' is a multiple of VW
-__attribute__((reqd_work_group_size(WGS, 1, 1)))
+// --> 'a_ld' is a multiple of VW2
+// --> 'a_rotated' is 0
+__attribute__((reqd_work_group_size(WGS2, 1, 1)))
__kernel void XgemvFast(const int m, const int n, const real alpha, const real beta,
- const int a_transposed,
- const __global realV* restrict agm, const int a_offset, const int a_ld,
+ const int a_rotated,
+ const __global realVF* restrict agm, const int a_offset, const int a_ld,
const __global real* restrict xgm, const int x_offset, const int x_inc,
__global real* ygm, const int y_offset, const int y_inc) {
// Local memory for the vector X
- __local real xlm[WGS];
+ __local real xlm[WGS2];
// Initializes the accumulation register
- real acc[WPT];
+ real acc[WPT2];
#pragma unroll
- for (int w=0; w<WPT; ++w) {
+ for (int w=0; w<WPT2; ++w) {
SetToZero(acc[w]);
}
// Loops over work-group sized portions of the work
- for (int kwg=0; kwg<n; kwg+=WGS) {
+ for (int kwg=0; kwg<n; kwg+=WGS2) {
// Loads the vector X into local memory
const int lid = get_local_id(0);
@@ -162,102 +184,156 @@ __kernel void XgemvFast(const int m, const int n, const real alpha, const real b
// Synchronizes all threads in a workgroup
barrier(CLK_LOCAL_MEM_FENCE);
- // The multiply-add function (not transposed)
- if (a_transposed == 0) {
+ // The multiply-add function (not rotated)
+ #pragma unroll
+ for (int kl=0; kl<WGS2; ++kl) {
+ const int k = kwg + kl;
#pragma unroll
- for (int kl=0; kl<WGS; ++kl) {
- const int k = kwg + kl;
- #pragma unroll
- for (int w=0; w<WPT/VW; ++w) {
- const int gid = (WPT/VW)*get_global_id(0) + w;
- #if VW == 1
- MultiplyAdd(acc[VW*w+0], xlm[kl], agm[gid + (a_ld/VW)*k]);
- #elif VW == 2
- MultiplyAdd(acc[VW*w+0], xlm[kl], agm[gid + (a_ld/VW)*k].x);
- MultiplyAdd(acc[VW*w+1], xlm[kl], agm[gid + (a_ld/VW)*k].y);
- #elif VW == 4
- MultiplyAdd(acc[VW*w+0], xlm[kl], agm[gid + (a_ld/VW)*k].x);
- MultiplyAdd(acc[VW*w+1], xlm[kl], agm[gid + (a_ld/VW)*k].y);
- MultiplyAdd(acc[VW*w+2], xlm[kl], agm[gid + (a_ld/VW)*k].z);
- MultiplyAdd(acc[VW*w+3], xlm[kl], agm[gid + (a_ld/VW)*k].w);
- #elif VW == 8
- MultiplyAdd(acc[VW*w+0], xlm[kl], agm[gid + (a_ld/VW)*k].s0);
- MultiplyAdd(acc[VW*w+1], xlm[kl], agm[gid + (a_ld/VW)*k].s1);
- MultiplyAdd(acc[VW*w+2], xlm[kl], agm[gid + (a_ld/VW)*k].s2);
- MultiplyAdd(acc[VW*w+3], xlm[kl], agm[gid + (a_ld/VW)*k].s3);
- MultiplyAdd(acc[VW*w+4], xlm[kl], agm[gid + (a_ld/VW)*k].s4);
- MultiplyAdd(acc[VW*w+5], xlm[kl], agm[gid + (a_ld/VW)*k].s5);
- MultiplyAdd(acc[VW*w+6], xlm[kl], agm[gid + (a_ld/VW)*k].s6);
- MultiplyAdd(acc[VW*w+7], xlm[kl], agm[gid + (a_ld/VW)*k].s7);
- #elif VW == 16
- MultiplyAdd(acc[VW*w+0], xlm[kl], agm[gid + (a_ld/VW)*k].s0);
- MultiplyAdd(acc[VW*w+1], xlm[kl], agm[gid + (a_ld/VW)*k].s1);
- MultiplyAdd(acc[VW*w+2], xlm[kl], agm[gid + (a_ld/VW)*k].s2);
- MultiplyAdd(acc[VW*w+3], xlm[kl], agm[gid + (a_ld/VW)*k].s3);
- MultiplyAdd(acc[VW*w+4], xlm[kl], agm[gid + (a_ld/VW)*k].s4);
- MultiplyAdd(acc[VW*w+5], xlm[kl], agm[gid + (a_ld/VW)*k].s5);
- MultiplyAdd(acc[VW*w+6], xlm[kl], agm[gid + (a_ld/VW)*k].s6);
- MultiplyAdd(acc[VW*w+7], xlm[kl], agm[gid + (a_ld/VW)*k].s7);
- MultiplyAdd(acc[VW*w+8], xlm[kl], agm[gid + (a_ld/VW)*k].s8);
- MultiplyAdd(acc[VW*w+9], xlm[kl], agm[gid + (a_ld/VW)*k].s9);
- MultiplyAdd(acc[VW*w+10], xlm[kl], agm[gid + (a_ld/VW)*k].sA);
- MultiplyAdd(acc[VW*w+11], xlm[kl], agm[gid + (a_ld/VW)*k].sB);
- MultiplyAdd(acc[VW*w+12], xlm[kl], agm[gid + (a_ld/VW)*k].sC);
- MultiplyAdd(acc[VW*w+13], xlm[kl], agm[gid + (a_ld/VW)*k].sD);
- MultiplyAdd(acc[VW*w+14], xlm[kl], agm[gid + (a_ld/VW)*k].sE);
- MultiplyAdd(acc[VW*w+15], xlm[kl], agm[gid + (a_ld/VW)*k].sF);
- #endif
- }
+ for (int w=0; w<WPT2/VW2; ++w) {
+ const int gid = (WPT2/VW2)*get_global_id(0) + w;
+ #if VW2 == 1
+ MultiplyAdd(acc[VW2*w+0], xlm[kl], agm[gid + (a_ld/VW2)*k]);
+ #elif VW2 == 2
+ MultiplyAdd(acc[VW2*w+0], xlm[kl], agm[gid + (a_ld/VW2)*k].x);
+ MultiplyAdd(acc[VW2*w+1], xlm[kl], agm[gid + (a_ld/VW2)*k].y);
+ #elif VW2 == 4
+ MultiplyAdd(acc[VW2*w+0], xlm[kl], agm[gid + (a_ld/VW2)*k].x);
+ MultiplyAdd(acc[VW2*w+1], xlm[kl], agm[gid + (a_ld/VW2)*k].y);
+ MultiplyAdd(acc[VW2*w+2], xlm[kl], agm[gid + (a_ld/VW2)*k].z);
+ MultiplyAdd(acc[VW2*w+3], xlm[kl], agm[gid + (a_ld/VW2)*k].w);
+ #elif VW2 == 8
+ MultiplyAdd(acc[VW2*w+0], xlm[kl], agm[gid + (a_ld/VW2)*k].s0);
+ MultiplyAdd(acc[VW2*w+1], xlm[kl], agm[gid + (a_ld/VW2)*k].s1);
+ MultiplyAdd(acc[VW2*w+2], xlm[kl], agm[gid + (a_ld/VW2)*k].s2);
+ MultiplyAdd(acc[VW2*w+3], xlm[kl], agm[gid + (a_ld/VW2)*k].s3);
+ MultiplyAdd(acc[VW2*w+4], xlm[kl], agm[gid + (a_ld/VW2)*k].s4);
+ MultiplyAdd(acc[VW2*w+5], xlm[kl], agm[gid + (a_ld/VW2)*k].s5);
+ MultiplyAdd(acc[VW2*w+6], xlm[kl], agm[gid + (a_ld/VW2)*k].s6);
+ MultiplyAdd(acc[VW2*w+7], xlm[kl], agm[gid + (a_ld/VW2)*k].s7);
+ #elif VW2 == 16
+ MultiplyAdd(acc[VW2*w+0], xlm[kl], agm[gid + (a_ld/VW2)*k].s0);
+ MultiplyAdd(acc[VW2*w+1], xlm[kl], agm[gid + (a_ld/VW2)*k].s1);
+ MultiplyAdd(acc[VW2*w+2], xlm[kl], agm[gid + (a_ld/VW2)*k].s2);
+ MultiplyAdd(acc[VW2*w+3], xlm[kl], agm[gid + (a_ld/VW2)*k].s3);
+ MultiplyAdd(acc[VW2*w+4], xlm[kl], agm[gid + (a_ld/VW2)*k].s4);
+ MultiplyAdd(acc[VW2*w+5], xlm[kl], agm[gid + (a_ld/VW2)*k].s5);
+ MultiplyAdd(acc[VW2*w+6], xlm[kl], agm[gid + (a_ld/VW2)*k].s6);
+ MultiplyAdd(acc[VW2*w+7], xlm[kl], agm[gid + (a_ld/VW2)*k].s7);
+ MultiplyAdd(acc[VW2*w+8], xlm[kl], agm[gid + (a_ld/VW2)*k].s8);
+ MultiplyAdd(acc[VW2*w+9], xlm[kl], agm[gid + (a_ld/VW2)*k].s9);
+ MultiplyAdd(acc[VW2*w+10], xlm[kl], agm[gid + (a_ld/VW2)*k].sA);
+ MultiplyAdd(acc[VW2*w+11], xlm[kl], agm[gid + (a_ld/VW2)*k].sB);
+ MultiplyAdd(acc[VW2*w+12], xlm[kl], agm[gid + (a_ld/VW2)*k].sC);
+ MultiplyAdd(acc[VW2*w+13], xlm[kl], agm[gid + (a_ld/VW2)*k].sD);
+ MultiplyAdd(acc[VW2*w+14], xlm[kl], agm[gid + (a_ld/VW2)*k].sE);
+ MultiplyAdd(acc[VW2*w+15], xlm[kl], agm[gid + (a_ld/VW2)*k].sF);
+ #endif
}
}
- // The multiply-add function (transposed)
- else {
+ // Synchronizes all threads in a workgroup
+ barrier(CLK_LOCAL_MEM_FENCE);
+ }
+
+ // Stores the final result
+ #pragma unroll
+ for (int w=0; w<WPT2; ++w) {
+ const int gid = WPT2*get_global_id(0) + w;
+ AXPBY(ygm[gid*y_inc + y_offset], alpha, acc[w], beta, ygm[gid*y_inc + y_offset]);
+ }
+}
+
+// =================================================================================================
+
+// Data-widths for the 'fast' kernel with rotated matrix
+#if VW3 == 1
+ typedef real realVFR;
+#elif VW3 == 2
+ typedef real2 realVFR;
+#elif VW3 == 4
+ typedef real4 realVFR;
+#elif VW3 == 8
+ typedef real8 realVFR;
+#elif VW3 == 16
+ typedef real16 realVFR;
+#endif
+
+// Faster version of the kernel, assuming that:
+// --> 'm' and 'n' are multiples of WGS3
+// --> 'a_offset' is 0
+// --> 'a_ld' is a multiple of VW3
+// --> 'a_rotated' is 1
+__attribute__((reqd_work_group_size(WGS3, 1, 1)))
+__kernel void XgemvFastRot(const int m, const int n, const real alpha, const real beta,
+ const int a_rotated,
+ const __global realVFR* restrict agm, const int a_offset, const int a_ld,
+ const __global real* restrict xgm, const int x_offset, const int x_inc,
+ __global real* ygm, const int y_offset, const int y_inc) {
+ // Local memory for the vector X
+ __local real xlm[WGS3];
+
+ // Initializes the accumulation register
+ real acc[WPT3];
+ #pragma unroll
+ for (int w=0; w<WPT3; ++w) {
+ SetToZero(acc[w]);
+ }
+
+ // Loops over work-group sized portions of the work
+ for (int kwg=0; kwg<n; kwg+=WGS3) {
+
+ // Loads the vector X into local memory
+ const int lid = get_local_id(0);
+ xlm[lid] = xgm[(kwg + lid)*x_inc + x_offset];
+
+ // Synchronizes all threads in a workgroup
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ // The multiply-add function (rotated)
+ #pragma unroll
+ for (int kl=0; kl<WGS3/VW3; ++kl) {
+ const int k = (kwg/VW3) + kl;
#pragma unroll
- for (int kl=0; kl<WGS/VW; ++kl) {
- const int k = (kwg/VW) + kl;
- #pragma unroll
- for (int w=0; w<WPT; ++w) {
- const int gid = WPT*get_global_id(0) + w;
- realV avec = agm[k + (a_ld/VW)*gid];
- #if VW == 1
- MultiplyAdd(acc[w], xlm[VW*kl+0], avec);
- #elif VW == 2
- MultiplyAdd(acc[w], xlm[VW*kl+0], avec.x);
- MultiplyAdd(acc[w], xlm[VW*kl+1], avec.y);
- #elif VW == 4
- MultiplyAdd(acc[w], xlm[VW*kl+0], avec.x);
- MultiplyAdd(acc[w], xlm[VW*kl+1], avec.y);
- MultiplyAdd(acc[w], xlm[VW*kl+2], avec.z);
- MultiplyAdd(acc[w], xlm[VW*kl+3], avec.w);
- #elif VW == 8
- MultiplyAdd(acc[w], xlm[VW*kl+0], avec.s0);
- MultiplyAdd(acc[w], xlm[VW*kl+1], avec.s1);
- MultiplyAdd(acc[w], xlm[VW*kl+2], avec.s2);
- MultiplyAdd(acc[w], xlm[VW*kl+3], avec.s3);
- MultiplyAdd(acc[w], xlm[VW*kl+4], avec.s4);
- MultiplyAdd(acc[w], xlm[VW*kl+5], avec.s5);
- MultiplyAdd(acc[w], xlm[VW*kl+6], avec.s6);
- MultiplyAdd(acc[w], xlm[VW*kl+7], avec.s7);
- #elif VW == 16
- MultiplyAdd(acc[w], xlm[VW*kl+0], avec.s0);
- MultiplyAdd(acc[w], xlm[VW*kl+1], avec.s1);
- MultiplyAdd(acc[w], xlm[VW*kl+2], avec.s2);
- MultiplyAdd(acc[w], xlm[VW*kl+3], avec.s3);
- MultiplyAdd(acc[w], xlm[VW*kl+4], avec.s4);
- MultiplyAdd(acc[w], xlm[VW*kl+5], avec.s5);
- MultiplyAdd(acc[w], xlm[VW*kl+6], avec.s6);
- MultiplyAdd(acc[w], xlm[VW*kl+7], avec.s7);
- MultiplyAdd(acc[w], xlm[VW*kl+8], avec.s8);
- MultiplyAdd(acc[w], xlm[VW*kl+9], avec.s9);
- MultiplyAdd(acc[w], xlm[VW*kl+10], avec.sA);
- MultiplyAdd(acc[w], xlm[VW*kl+11], avec.sB);
- MultiplyAdd(acc[w], xlm[VW*kl+12], avec.sC);
- MultiplyAdd(acc[w], xlm[VW*kl+13], avec.sD);
- MultiplyAdd(acc[w], xlm[VW*kl+14], avec.sE);
- MultiplyAdd(acc[w], xlm[VW*kl+15], avec.sF);
- #endif
- }
+ for (int w=0; w<WPT3; ++w) {
+ const int gid = WPT3*get_global_id(0) + w;
+ realVFR avec = agm[k + (a_ld/VW3)*gid];
+ #if VW3 == 1
+ MultiplyAdd(acc[w], xlm[VW3*kl+0], avec);
+ #elif VW3 == 2
+ MultiplyAdd(acc[w], xlm[VW3*kl+0], avec.x);
+ MultiplyAdd(acc[w], xlm[VW3*kl+1], avec.y);
+ #elif VW3 == 4
+ MultiplyAdd(acc[w], xlm[VW3*kl+0], avec.x);
+ MultiplyAdd(acc[w], xlm[VW3*kl+1], avec.y);
+ MultiplyAdd(acc[w], xlm[VW3*kl+2], avec.z);
+ MultiplyAdd(acc[w], xlm[VW3*kl+3], avec.w);
+ #elif VW3 == 8
+ MultiplyAdd(acc[w], xlm[VW3*kl+0], avec.s0);
+ MultiplyAdd(acc[w], xlm[VW3*kl+1], avec.s1);
+ MultiplyAdd(acc[w], xlm[VW3*kl+2], avec.s2);
+ MultiplyAdd(acc[w], xlm[VW3*kl+3], avec.s3);
+ MultiplyAdd(acc[w], xlm[VW3*kl+4], avec.s4);
+ MultiplyAdd(acc[w], xlm[VW3*kl+5], avec.s5);
+ MultiplyAdd(acc[w], xlm[VW3*kl+6], avec.s6);
+ MultiplyAdd(acc[w], xlm[VW3*kl+7], avec.s7);
+ #elif VW3 == 16
+ MultiplyAdd(acc[w], xlm[VW3*kl+0], avec.s0);
+ MultiplyAdd(acc[w], xlm[VW3*kl+1], avec.s1);
+ MultiplyAdd(acc[w], xlm[VW3*kl+2], avec.s2);
+ MultiplyAdd(acc[w], xlm[VW3*kl+3], avec.s3);
+ MultiplyAdd(acc[w], xlm[VW3*kl+4], avec.s4);
+ MultiplyAdd(acc[w], xlm[VW3*kl+5], avec.s5);
+ MultiplyAdd(acc[w], xlm[VW3*kl+6], avec.s6);
+ MultiplyAdd(acc[w], xlm[VW3*kl+7], avec.s7);
+ MultiplyAdd(acc[w], xlm[VW3*kl+8], avec.s8);
+ MultiplyAdd(acc[w], xlm[VW3*kl+9], avec.s9);
+ MultiplyAdd(acc[w], xlm[VW3*kl+10], avec.sA);
+ MultiplyAdd(acc[w], xlm[VW3*kl+11], avec.sB);
+ MultiplyAdd(acc[w], xlm[VW3*kl+12], avec.sC);
+ MultiplyAdd(acc[w], xlm[VW3*kl+13], avec.sD);
+ MultiplyAdd(acc[w], xlm[VW3*kl+14], avec.sE);
+ MultiplyAdd(acc[w], xlm[VW3*kl+15], avec.sF);
+ #endif
}
}
@@ -267,8 +343,8 @@ __kernel void XgemvFast(const int m, const int n, const real alpha, const real b
// Stores the final result
#pragma unroll
- for (int w=0; w<WPT; ++w) {
- const int gid = WPT*get_global_id(0) + w;
+ for (int w=0; w<WPT3; ++w) {
+ const int gid = WPT3*get_global_id(0) + w;
AXPBY(ygm[gid*y_inc + y_offset], alpha, acc[w], beta, ygm[gid*y_inc + y_offset]);
}
}