summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2017-12-03 16:40:36 +0100
committerCedric Nugteren <web@cedricnugteren.nl>2017-12-03 16:40:36 +0100
commitcf4555d1f44aea9c82b60211b5650b6b77a1226c (patch)
tree459676fcf89b85aaab7b014d935d1f5b3ab984fc
parent0a1a3de58a410f61f3b990537541a633826ea640 (diff)
Added GEMM (direct and in-direct) to the pre-processor testing; modified the loops in kernel accordingly
-rw-r--r--src/kernels/level1/xdot.opencl2
-rw-r--r--src/kernels/level3/xgemm_direct_part1.opencl90
-rw-r--r--src/kernels/level3/xgemm_direct_part2.opencl53
-rw-r--r--src/kernels/level3/xgemm_direct_part3.opencl14
-rw-r--r--src/kernels/level3/xgemm_part1.opencl118
-rw-r--r--src/kernels/level3/xgemm_part2.opencl82
-rw-r--r--src/kernels/level3/xgemm_part3.opencl10
-rw-r--r--test/correctness/misc/preprocessor.cpp21
8 files changed, 204 insertions, 186 deletions
diff --git a/src/kernels/level1/xdot.opencl b/src/kernels/level1/xdot.opencl
index 02f04ea7..1a703d96 100644
--- a/src/kernels/level1/xdot.opencl
+++ b/src/kernels/level1/xdot.opencl
@@ -55,7 +55,6 @@ void Xdot(const int n,
barrier(CLK_LOCAL_MEM_FENCE);
// Performs reduction in local memory
- #pragma unroll
for (int s=WGS1/2; s>0; s=s>>1) {
if (lid < s) {
Add(lm[lid], lm[lid], lm[lid + s]);
@@ -84,7 +83,6 @@ void XdotEpilogue(const __global real* restrict input,
barrier(CLK_LOCAL_MEM_FENCE);
// Performs reduction in local memory
- #pragma unroll
for (int s=WGS2/2; s>0; s=s>>1) {
if (lid < s) {
Add(lm[lid], lm[lid], lm[lid + s]);
diff --git a/src/kernels/level3/xgemm_direct_part1.opencl b/src/kernels/level3/xgemm_direct_part1.opencl
index 7d185224..e2f9c6a8 100644
--- a/src/kernels/level3/xgemm_direct_part1.opencl
+++ b/src/kernels/level3/xgemm_direct_part1.opencl
@@ -95,10 +95,10 @@ R"(
// Initializes the accumulation registers to zero
INLINE_FUNC void InitAccRegistersDirect(real cpm[NWID][MWID]) {
#pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
#pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
- SetToZero(cpm[ni][mi]);
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ SetToZero(cpm[_ni][_mi]);
}
}
}
@@ -108,10 +108,10 @@ INLINE_FUNC void InitAccRegistersDirect(real cpm[NWID][MWID]) {
// Performs the actual computation: Cpm += Apm * Bpm
INLINE_FUNC void MultiplyAccumulateDirect(real cpm[NWID][MWID], real apm[MWID], real bpm[NWID]) {
#pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
#pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
- MultiplyAdd(cpm[ni][mi], apm[mi], bpm[ni]);
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ MultiplyAdd(cpm[_ni][_mi], apm[_mi], bpm[_ni]);
}
}
}
@@ -124,10 +124,10 @@ INLINE_FUNC void GlobalToPrivateDirectA(const __global real* restrict agms, real
const int a_ld, const int a_offset, const int idm, const int idk,
const int a_transpose, const int a_conjugate) {
#pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
- const int a_index = (a_transpose) ? (idm + mi)*a_ld + idk : idk*a_ld + (idm + mi);
- apm[mi] = agms[a_index + a_offset];
- if (a_conjugate) { COMPLEX_CONJUGATE(apm[mi]); }
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ const int a_index = (a_transpose) ? (idm + _mi)*a_ld + idk : idk*a_ld + (idm + _mi);
+ apm[_mi] = agms[a_index + a_offset];
+ if (a_conjugate) { COMPLEX_CONJUGATE(apm[_mi]); }
}
}
@@ -136,10 +136,10 @@ INLINE_FUNC void GlobalToPrivateDirectB(const __global real* restrict bgms, real
const int b_ld, const int b_offset, const int idn, const int idk,
const int b_transpose, const int b_conjugate) {
#pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
- const int b_index = (b_transpose) ? (idn + ni)*b_ld + idk : idk*b_ld + (idn + ni);
- bpm[ni] = bgms[b_index + b_offset];
- if (b_conjugate) { COMPLEX_CONJUGATE(bpm[ni]); }
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ const int b_index = (b_transpose) ? (idn + _ni)*b_ld + idk : idk*b_ld + (idn + _ni);
+ bpm[_ni] = bgms[b_index + b_offset];
+ if (b_conjugate) { COMPLEX_CONJUGATE(bpm[_ni]); }
}
}
@@ -150,14 +150,14 @@ INLINE_FUNC void GlobalToPrivateCheckedA(const __global real* restrict agms, rea
const int a_transpose, const int a_conjugate,
const int kSizeM) {
#pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
- if (idm + mi < kSizeM) {
- const int a_index = (a_transpose) ? (idm + mi)*a_ld + idk : idk*a_ld + (idm + mi);
- apm[mi] = agms[a_index + a_offset];
- if (a_conjugate) { COMPLEX_CONJUGATE(apm[mi]); }
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ if (idm + _mi < kSizeM) {
+ const int a_index = (a_transpose) ? (idm + _mi)*a_ld + idk : idk*a_ld + (idm + _mi);
+ apm[_mi] = agms[a_index + a_offset];
+ if (a_conjugate) { COMPLEX_CONJUGATE(apm[_mi]); }
}
else {
- SetToZero(apm[mi]);
+ SetToZero(apm[_mi]);
}
}
}
@@ -168,14 +168,14 @@ INLINE_FUNC void GlobalToPrivateCheckedB(const __global real* restrict bgms, rea
const int b_transpose, const int b_conjugate,
const int kSizeN) {
#pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
- if (idn + ni < kSizeN) {
- const int b_index = (b_transpose) ? (idn + ni)*b_ld + idk : idk*b_ld + (idn + ni);
- bpm[ni] = bgms[b_index + b_offset];
- if (b_conjugate) { COMPLEX_CONJUGATE(bpm[ni]); }
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ if (idn + _ni < kSizeN) {
+ const int b_index = (b_transpose) ? (idn + _ni)*b_ld + idk : idk*b_ld + (idn + _ni);
+ bpm[_ni] = bgms[b_index + b_offset];
+ if (b_conjugate) { COMPLEX_CONJUGATE(bpm[_ni]); }
}
else {
- SetToZero(bpm[ni]);
+ SetToZero(bpm[_ni]);
}
}
}
@@ -187,10 +187,10 @@ INLINE_FUNC void GlobalToPrivateCheckedB(const __global real* restrict bgms, rea
INLINE_FUNC void LocalToPrivateDirectA(LOCAL_PTR real* alm, real apm[MWID], const int kg,
const int a_transpose) {
#pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
- const int mg = mi + get_local_id(0)*MWID;
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ const int mg = _mi + get_local_id(0)*MWID;
const int index = (a_transpose) ? mg*(WGD + PADA) + kg : kg*(WGD + PADA) + mg;
- apm[mi] = alm[index];
+ apm[_mi] = alm[index];
}
}
@@ -198,10 +198,10 @@ INLINE_FUNC void LocalToPrivateDirectA(LOCAL_PTR real* alm, real apm[MWID], cons
INLINE_FUNC void LocalToPrivateDirectB(LOCAL_PTR real* blm, real bpm[NWID], const int kg,
const int b_transpose) {
#pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
- const int ng = ni + get_local_id(1)*NWID;
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
+ const int ng = _ni + get_local_id(1)*NWID;
const int index = (b_transpose) ? ng*(WGD + PADB) + kg : kg*(WGD + PADB) + ng;
- bpm[ni] = blm[index];
+ bpm[_ni] = blm[index];
}
}
@@ -214,21 +214,21 @@ INLINE_FUNC void StoreResultsDirect(__global real* cgm, real cpm[NWID][MWID],
const real alpha, const real beta,
const int c_ld, const int c_offset, const int c_transpose) {
#pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
#pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
- // Determines the destination index
- int c_index = (c_transpose) ? (idm + mi)*c_ld + (idn + ni) : (idn + ni)*c_ld + (idm + mi);
+ // Deter_mines the destination index
+ int c_index = (c_transpose) ? (idm + _mi)*c_ld + (idn + _ni) : (idn + _ni)*c_ld + (idm + _mi);
// The final multiplication with alpha (in case beta == 0)
real result;
if (IsZero(beta)) {
- Multiply(result, alpha, cpm[ni][mi]);
+ Multiply(result, alpha, cpm[_ni][_mi]);
}
// The final multiplication with alpha and the addition with beta*C
else {
- AXPBY(result, alpha, cpm[ni][mi], beta, cgm[c_index + c_offset]);
+ AXPBY(result, alpha, cpm[_ni][_mi], beta, cgm[c_index + c_offset]);
}
cgm[c_index + c_offset] = result;
}
@@ -242,22 +242,22 @@ INLINE_FUNC void StoreResultsChecked(__global real* cgm, real cpm[NWID][MWID],
const real alpha, const real beta,
const int c_ld, const int c_offset, const int c_transpose) {
#pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
+ for (int _ni = 0; _ni < NWID; _ni += 1) {
#pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
- if ((idm + mi) < kSizeM && (idn + ni) < kSizeN) {
+ for (int _mi = 0; _mi < MWID; _mi += 1) {
+ if ((idm + _mi) < kSizeM && (idn + _ni) < kSizeN) {
- // Determines the destination index
- int c_index = (c_transpose) ? (idm + mi)*c_ld + (idn + ni) : (idn + ni)*c_ld + (idm + mi);
+ // Deter_mines the destination index
+ int c_index = (c_transpose) ? (idm + _mi)*c_ld + (idn + _ni) : (idn + _ni)*c_ld + (idm + _mi);
// The final multiplication with alpha (in case beta == 0)
real result;
if (IsZero(beta)) {
- Multiply(result, alpha, cpm[ni][mi]);
+ Multiply(result, alpha, cpm[_ni][_mi]);
}
// The final multiplication with alpha and the addition with beta*C
else {
- AXPBY(result, alpha, cpm[ni][mi], beta, cgm[c_index + c_offset]);
+ AXPBY(result, alpha, cpm[_ni][_mi], beta, cgm[c_index + c_offset]);
}
cgm[c_index + c_offset] = result;
}
diff --git a/src/kernels/level3/xgemm_direct_part2.opencl b/src/kernels/level3/xgemm_direct_part2.opencl
index c3bf1b80..f5bb3eb8 100644
--- a/src/kernels/level3/xgemm_direct_part2.opencl
+++ b/src/kernels/level3/xgemm_direct_part2.opencl
@@ -31,13 +31,13 @@ INLINE_FUNC void GlobalToLocalDirectA(const __global realMD* restrict agm, LOCAL
const int la1 = tid / MDIMAD;
#endif
#pragma unroll
- for (int mia=0; mia<MWAD/VWMD; ++mia) {
+ for (int _mia = 0; _mia < MWAD/VWMD; _mia += 1) {
#pragma unroll
- for (int kia=0; kia<KWAD; ++kia) {
+ for (int _kia = 0; _kia < KWAD; _kia += 1) {
// Computes the indices for the global memory
- int mg = mia + la0*(MWAD/VWMD);
- int kg = kia + la1*KWAD;
+ int mg = _mia + la0*(MWAD/VWMD);
+ int kg = _kia + la1*KWAD;
int idm = (a_transpose) ? mg + kwg/VWMD : mg + GetGroupID0()*(WGD/VWMD);
int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg;
@@ -102,13 +102,13 @@ INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, LOCAL
const int lb1 = tid / NDIMBD;
#endif
#pragma unroll
- for (int kib=0; kib<KWBD; ++kib) {
+ for (int _kib = 0; _kib < KWBD; _kib += 1) {
#pragma unroll
- for (int nib=0; nib<NWBD/VWND; ++nib) {
+ for (int _nib = 0; _nib < NWBD/VWND; _nib += 1) {
// Computes the indices for the global memory
- int ng = nib + lb0*(NWBD/VWND);
- int kg = kib + lb1*KWBD;
+ int ng = _nib + lb0*(NWBD/VWND);
+ int kg = _kib + lb1*KWBD;
int idn = (b_transpose) ? ng + kwg/VWND : ng + GetGroupID1()*(WGD/VWND);
int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg;
@@ -152,8 +152,9 @@ INLINE_FUNC void GlobalToLocalDirectB(const __global realND* restrict bgm, LOCAL
blm[kg*(WGD + PADB) + ng*VWND + 15] = bvec.sF;
#endif
if (b_conjugate) {
- for (int vn=0; vn<VWND; ++vn) {
- COMPLEX_CONJUGATE(blm[kg*(WGD + PADB) + ng*VWND + vn]);
+ #pragma unroll
+ for (int _vn = 0; _vn < VWND; _vn += 1) {
+ COMPLEX_CONJUGATE(blm[kg*(WGD + PADB) + ng*VWND + _vn]);
}
}
}
@@ -177,13 +178,13 @@ INLINE_FUNC void GlobalToLocalScalarA(const __global real* restrict agms, LOCAL_
const int la1 = tid / MDIMAD;
#endif
#pragma unroll
- for (int mia=0; mia<MWAD; ++mia) {
+ for (int _mia = 0; _mia < MWAD; _mia += 1) {
#pragma unroll
- for (int kia=0; kia<KWAD; ++kia) {
+ for (int _kia = 0; _kia < KWAD; _kia += 1) {
// Computes the indices for the global memory
- int mg = mia + la0*MWAD;
- int kg = kia + la1*KWAD;
+ int mg = _mia + la0*MWAD;
+ int kg = _kia + la1*KWAD;
int idm = (a_transpose) ? mg + kwg : mg + GetGroupID0()*WGD;
int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg;
@@ -208,13 +209,13 @@ INLINE_FUNC void GlobalToLocalScalarB(const __global real* restrict bgms, LOCAL_
const int lb1 = tid / NDIMBD;
#endif
#pragma unroll
- for (int kib=0; kib<KWBD; ++kib) {
+ for (int _kib = 0; _kib < KWBD; _kib += 1) {
#pragma unroll
- for (int nib=0; nib<NWBD; ++nib) {
+ for (int _nib = 0; _nib < NWBD; _nib += 1) {
// Computes the indices for the global memory
- int ng = nib + lb0*NWBD;
- int kg = kib + lb1*KWBD;
+ int ng = _nib + lb0*NWBD;
+ int kg = _kib + lb1*KWBD;
int idn = (b_transpose) ? ng + kwg : ng + GetGroupID1()*WGD;
int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg;
@@ -244,13 +245,13 @@ INLINE_FUNC void GlobalToLocalCheckedA(const __global real* restrict agms, LOCAL
const int la1 = tid / MDIMAD;
#endif
#pragma unroll
- for (int mia=0; mia<MWAD; ++mia) {
+ for (int _mia = 0; _mia < MWAD; _mia += 1) {
#pragma unroll
- for (int kia=0; kia<KWAD; ++kia) {
+ for (int _kia = 0; _kia < KWAD; _kia += 1) {
// Computes the indices for the global memory
- int mg = mia + la0*MWAD;
- int kg = kia + la1*KWAD;
+ int mg = _mia + la0*MWAD;
+ int kg = _kia + la1*KWAD;
int idm = (a_transpose) ? mg + kwg : mg + GetGroupID0()*WGD;
int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg;
@@ -283,13 +284,13 @@ INLINE_FUNC void GlobalToLocalCheckedB(const __global real* restrict bgms, LOCAL
const int lb1 = tid / NDIMBD;
#endif
#pragma unroll
- for (int kib=0; kib<KWBD; ++kib) {
+ for (int _kib = 0; _kib < KWBD; _kib += 1) {
#pragma unroll
- for (int nib=0; nib<NWBD; ++nib) {
+ for (int _nib = 0; _nib < NWBD; _nib += 1) {
// Computes the indices for the global memory
- int ng = nib + lb0*NWBD;
- int kg = kib + lb1*KWBD;
+ int ng = _nib + lb0*NWBD;
+ int kg = _kib + lb1*KWBD;
int idn = (b_transpose) ? ng + kwg : ng + GetGroupID1()*WGD;
int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg;
diff --git a/src/kernels/level3/xgemm_direct_part3.opencl b/src/kernels/level3/xgemm_direct_part3.opencl
index 5862dfa3..b24695a1 100644
--- a/src/kernels/level3/xgemm_direct_part3.opencl
+++ b/src/kernels/level3/xgemm_direct_part3.opencl
@@ -50,7 +50,7 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize
// Loops over all complete workgroup tiles (K-dimension)
int kwg = 0;
- for (; kwg < (kSizeK/WGD) * WGD; kwg+=WGD) {
+ for (; kwg < (kSizeK/WGD) * WGD; kwg += WGD) {
// Loads data: off-chip --> local (matrix A and B)
if (a_ld % VWMD == 0 && a_offset % VWMD == 0) {
@@ -68,10 +68,10 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize
barrier(CLK_LOCAL_MEM_FENCE);
// Loops over all workitem tiles, unrolled by a factor KWID
- for (int pwi=0; pwi<WGD; pwi+=KWID) {
+ for (int pwi = 0; pwi < WGD; pwi += KWID) {
#pragma unroll
- for (int pit=0; pit<KWID; ++pit) {
- int kg = pwi + pit;
+ for (int _pit = 0; _pit < KWID; _pit += 1) {
+ int kg = pwi + _pit;
// Loads data: local --> private (matrix A and B)
LocalToPrivateDirectA(alm, apm, kg, a_transpose);
@@ -112,10 +112,10 @@ INLINE_FUNC void XgemmDirect(const int kSizeM, const int kSizeN, const int kSize
barrier(CLK_LOCAL_MEM_FENCE);
// Loops over all workitem tiles, unrolled by a factor KWID
- for (int pwi=0; pwi<WGD; pwi+=KWID) {
+ for (int pwi = 0; pwi < WGD; pwi += KWID) {
#pragma unroll
- for (int pit=0; pit<KWID; ++pit) {
- int kg = pwi + pit;
+ for (int _pit = 0; _pit < KWID; _pit += 1) {
+ int kg = pwi + _pit;
// Loads data: local --> private (matrix A and B)
LocalToPrivateDirectA(alm, apm, kg, a_transpose);
diff --git a/src/kernels/level3/xgemm_part1.opencl b/src/kernels/level3/xgemm_part1.opencl
index 172b3c6b..e118ba2f 100644
--- a/src/kernels/level3/xgemm_part1.opencl
+++ b/src/kernels/level3/xgemm_part1.opencl
@@ -137,45 +137,45 @@ R"(
// Initializes the accumulation registers to zero
INLINE_FUNC void InitAccRegisters(realM cpm[NWI][MWI/VWM]) {
#pragma unroll
- for (int mi=0; mi<MWI/VWM; ++mi) {
+ for (int _mi = 0; _mi < MWI/VWM; _mi += 1) {
#pragma unroll
- for (int ni=0; ni<NWI; ++ni) {
+ for (int _ni = 0; _ni < NWI; _ni += 1) {
#if VWM == 1
- SetToZero(cpm[ni][mi]);
+ SetToZero(cpm[_ni][_mi]);
#elif VWM == 2
- SetToZero(cpm[ni][mi].x);
- SetToZero(cpm[ni][mi].y);
+ SetToZero(cpm[_ni][_mi].x);
+ SetToZero(cpm[_ni][_mi].y);
#elif VWM == 4
- SetToZero(cpm[ni][mi].x);
- SetToZero(cpm[ni][mi].y);
- SetToZero(cpm[ni][mi].z);
- SetToZero(cpm[ni][mi].w);
+ SetToZero(cpm[_ni][_mi].x);
+ SetToZero(cpm[_ni][_mi].y);
+ SetToZero(cpm[_ni][_mi].z);
+ SetToZero(cpm[_ni][_mi].w);
#elif VWM == 8
- SetToZero(cpm[ni][mi].s0);
- SetToZero(cpm[ni][mi].s1);
- SetToZero(cpm[ni][mi].s2);
- SetToZero(cpm[ni][mi].s3);
- SetToZero(cpm[ni][mi].s4);
- SetToZero(cpm[ni][mi].s5);
- SetToZero(cpm[ni][mi].s6);
- SetToZero(cpm[ni][mi].s7);
+ SetToZero(cpm[_ni][_mi].s0);
+ SetToZero(cpm[_ni][_mi].s1);
+ SetToZero(cpm[_ni][_mi].s2);
+ SetToZero(cpm[_ni][_mi].s3);
+ SetToZero(cpm[_ni][_mi].s4);
+ SetToZero(cpm[_ni][_mi].s5);
+ SetToZero(cpm[_ni][_mi].s6);
+ SetToZero(cpm[_ni][_mi].s7);
#elif VWM == 16
- SetToZero(cpm[ni][mi].s0);
- SetToZero(cpm[ni][mi].s1);
- SetToZero(cpm[ni][mi].s2);
- SetToZero(cpm[ni][mi].s3);
- SetToZero(cpm[ni][mi].s4);
- SetToZero(cpm[ni][mi].s5);
- SetToZero(cpm[ni][mi].s6);
- SetToZero(cpm[ni][mi].s7);
- SetToZero(cpm[ni][mi].s8);
- SetToZero(cpm[ni][mi].s9);
- SetToZero(cpm[ni][mi].sA);
- SetToZero(cpm[ni][mi].sB);
- SetToZero(cpm[ni][mi].sC);
- SetToZero(cpm[ni][mi].sD);
- SetToZero(cpm[ni][mi].sE);
- SetToZero(cpm[ni][mi].sF);
+ SetToZero(cpm[_ni][_mi].s0);
+ SetToZero(cpm[_ni][_mi].s1);
+ SetToZero(cpm[_ni][_mi].s2);
+ SetToZero(cpm[_ni][_mi].s3);
+ SetToZero(cpm[_ni][_mi].s4);
+ SetToZero(cpm[_ni][_mi].s5);
+ SetToZero(cpm[_ni][_mi].s6);
+ SetToZero(cpm[_ni][_mi].s7);
+ SetToZero(cpm[_ni][_mi].s8);
+ SetToZero(cpm[_ni][_mi].s9);
+ SetToZero(cpm[_ni][_mi].sA);
+ SetToZero(cpm[_ni][_mi].sB);
+ SetToZero(cpm[_ni][_mi].sC);
+ SetToZero(cpm[_ni][_mi].sD);
+ SetToZero(cpm[_ni][_mi].sE);
+ SetToZero(cpm[_ni][_mi].sF);
#endif
}
}
@@ -191,19 +191,19 @@ INLINE_FUNC void GlobalToLocalA(const __global realM* restrict agm, LOCAL_PTR re
const int la0 = tid % MDIMA;
const int la1 = tid / MDIMA;
#pragma unroll
- for (int mia=0; mia<MWA/VWM; ++mia) {
+ for (int _mia = 0; _mia < MWA/VWM; _mia += 1) {
#pragma unroll
- for (int kia=0; kia<KWA; ++kia) {
+ for (int _kia = 0; _kia < KWA; _kia += 1) {
// Computes the indices based on strided/non-strided access
#if STRM == 0
- int mg = mia + la0*(MWA/VWM);
+ int mg = _mia + la0*(MWA/VWM);
#elif STRM == 1
- int mg = la0 + mia*MDIMA;
+ int mg = la0 + _mia*MDIMA;
#endif
// Computes the indices for the global memory
- int kg = kia + la1*KWA;
+ int kg = _kia + la1*KWA;
int idm = mg + GetGroupID0() * (MWG/VWM);
int idk = kg + kwg;
@@ -221,19 +221,19 @@ INLINE_FUNC void GlobalToLocalB(const __global realN* restrict bgm, LOCAL_PTR re
const int lb0 = tid % NDIMB;
const int lb1 = tid / NDIMB;
#pragma unroll
- for (int kib=0; kib<KWB; ++kib) {
+ for (int _kib = 0; _kib < KWB; _kib += 1) {
#pragma unroll
- for (int nib=0; nib<NWB/VWN; ++nib) {
+ for (int _nib = 0; _nib < NWB/VWN; _nib += 1) {
// Computes the indices based on strided/non-strided access
#if STRN == 0
- int ng = nib + lb0*(NWB/VWN);
+ int ng = _nib + lb0*(NWB/VWN);
#elif STRN == 1
- int ng = lb0 + nib*NDIMB;
+ int ng = lb0 + _nib*NDIMB;
#endif
// Computes the indices for the global memory
- int kg = kib + lb1*KWB;
+ int kg = _kib + lb1*KWB;
int idn = ng + GetGroupID1() * (NWG/VWN);
int idk = kg + kwg;
@@ -252,20 +252,20 @@ INLINE_FUNC void GlobalToLocalB(const __global realN* restrict bgm, LOCAL_PTR re
INLINE_FUNC void GlobalToPrivateA(const __global realM* restrict agm, realM apm[MWI/VWM],
const int kSizeM, const int idk, const int kwg) {
#pragma unroll
- for (int mi=0; mi<MWI/VWM; ++mi) {
+ for (int _mi = 0; _mi < MWI/VWM; _mi += 1) {
// Computes the indices based on strided/non-strided access
#if STRM == 0
- int mg = mi + get_local_id(0)*(MWI/VWM);
+ int mg = _mi + get_local_id(0)*(MWI/VWM);
#elif STRM == 1
- int mg = get_local_id(0) + mi*MDIMC;
+ int mg = get_local_id(0) + _mi*MDIMC;
#endif
// Computes the indices for the global memory
int idm = mg + GetGroupID0() * (MWG/VWM);
// Loads the data from global memory (not transposed) and stores into registers
- apm[mi] = agm[idk*(kSizeM/VWM) + idm];
+ apm[_mi] = agm[idk*(kSizeM/VWM) + idm];
}
}
#endif
@@ -275,20 +275,20 @@ INLINE_FUNC void GlobalToPrivateA(const __global realM* restrict agm, realM apm[
INLINE_FUNC void GlobalToPrivateB(const __global realN* restrict bgm, realN bpm[NWI/VWN],
const int kSizeN, const int idk) {
#pragma unroll
- for (int ni=0; ni<NWI/VWN; ++ni) {
+ for (int _ni = 0; _ni < NWI/VWN; _ni += 1) {
// Computes the indices based on strided/non-strided access
#if STRN == 0
- int ng = ni + get_local_id(1)*(NWI/VWN);
+ int ng = _ni + get_local_id(1)*(NWI/VWN);
#elif STRN == 1
- int ng = get_local_id(1) + ni*NDIMC;
+ int ng = get_local_id(1) + _ni*NDIMC;
#endif
// Computes the indices for the global memory
int idn = ng + GetGroupID1() * (NWG/VWN);
// Loads the data from global memory (transposed) and stores into registers
- bpm[ni] = bgm[idk*(kSizeN/VWN) + idn];
+ bpm[_ni] = bgm[idk*(kSizeN/VWN) + idn];
}
}
#endif
@@ -300,13 +300,13 @@ INLINE_FUNC void GlobalToPrivateB(const __global realN* restrict bgm, realN bpm[
#if SA == 1
INLINE_FUNC void LocalToPrivateA(LOCAL_PTR realM* alm, realM apm[MWI/VWM], const int kg) {
#pragma unroll
- for (int mi=0; mi<MWI/VWM; ++mi) {
+ for (int _mi = 0; _mi < MWI/VWM; _mi += 1) {
#if STRM == 0
- int mg = mi + get_local_id(0)*(MWI/VWM);
+ int mg = _mi + get_local_id(0)*(MWI/VWM);
#elif STRM == 1
- int mg = get_local_id(0) + mi*MDIMC;
+ int mg = get_local_id(0) + _mi*MDIMC;
#endif
- apm[mi] = alm[kg*(MWG/VWM) + mg];
+ apm[_mi] = alm[kg*(MWG/VWM) + mg];
}
}
#endif
@@ -315,13 +315,13 @@ INLINE_FUNC void LocalToPrivateA(LOCAL_PTR realM* alm, realM apm[MWI/VWM], const
#if SB == 1
INLINE_FUNC void LocalToPrivateB(LOCAL_PTR realN* blm, realN bpm[NWI/VWN], const int kg) {
#pragma unroll
- for (int ni=0; ni<NWI/VWN; ++ni) {
+ for (int _ni = 0; _ni < NWI/VWN; _ni += 1) {
#if STRN == 0
- int ng = ni + get_local_id(1)*(NWI/VWN);
+ int ng = _ni + get_local_id(1)*(NWI/VWN);
#elif STRN == 1
- int ng = get_local_id(1) + ni*NDIMC;
+ int ng = get_local_id(1) + _ni*NDIMC;
#endif
- bpm[ni] = blm[kg*(NWG/VWN) + ng];
+ bpm[_ni] = blm[kg*(NWG/VWN) + ng];
}
}
#endif
diff --git a/src/kernels/level3/xgemm_part2.opencl b/src/kernels/level3/xgemm_part2.opencl
index 06fafc8f..a5507458 100644
--- a/src/kernels/level3/xgemm_part2.opencl
+++ b/src/kernels/level3/xgemm_part2.opencl
@@ -66,46 +66,46 @@ INLINE_FUNC realM MultiplyAddVector(realM cvec, const realM avec, const real bva
// Performs the actual computation: Cpm += Apm * Bpm
INLINE_FUNC void MultiplyAccumulate(realM cpm[NWI][MWI/VWM], realM apm[MWI/VWM], realN bpm[NWI/VWN]) {
#pragma unroll
- for (int ni=0; ni<NWI/VWN; ++ni) {
+ for (int _ni = 0; _ni < NWI/VWN; _ni += 1) {
#pragma unroll
- for (int mi=0; mi<MWI/VWM; ++mi) {
- const realM aval = apm[mi];
+ for (int _mi = 0; _mi < MWI/VWM; _mi += 1) {
+ const realM aval = apm[_mi];
#if VWN == 1
- cpm[ni*VWN + 0][mi] = MultiplyAddVector(cpm[ni*VWN + 0][mi], aval, bpm[ni]);
+ cpm[_ni*VWN + 0][_mi] = MultiplyAddVector(cpm[_ni*VWN + 0][_mi], aval, bpm[_ni]);
#elif VWN == 2
- cpm[ni*VWN + 0][mi] = MultiplyAddVector(cpm[ni*VWN + 0][mi], aval, bpm[ni].x);
- cpm[ni*VWN + 1][mi] = MultiplyAddVector(cpm[ni*VWN + 1][mi], aval, bpm[ni].y);
+ cpm[_ni*VWN + 0][_mi] = MultiplyAddVector(cpm[_ni*VWN + 0][_mi], aval, bpm[_ni].x);
+ cpm[_ni*VWN + 1][_mi] = MultiplyAddVector(cpm[_ni*VWN + 1][_mi], aval, bpm[_ni].y);
#elif VWN == 4
- cpm[ni*VWN + 0][mi] = MultiplyAddVector(cpm[ni*VWN + 0][mi], aval, bpm[ni].x);
- cpm[ni*VWN + 1][mi] = MultiplyAddVector(cpm[ni*VWN + 1][mi], aval, bpm[ni].y);
- cpm[ni*VWN + 2][mi] = MultiplyAddVector(cpm[ni*VWN + 2][mi], aval, bpm[ni].z);
- cpm[ni*VWN + 3][mi] = MultiplyAddVector(cpm[ni*VWN + 3][mi], aval, bpm[ni].w);
+ cpm[_ni*VWN + 0][_mi] = MultiplyAddVector(cpm[_ni*VWN + 0][_mi], aval, bpm[_ni].x);
+ cpm[_ni*VWN + 1][_mi] = MultiplyAddVector(cpm[_ni*VWN + 1][_mi], aval, bpm[_ni].y);
+ cpm[_ni*VWN + 2][_mi] = MultiplyAddVector(cpm[_ni*VWN + 2][_mi], aval, bpm[_ni].z);
+ cpm[_ni*VWN + 3][_mi] = MultiplyAddVector(cpm[_ni*VWN + 3][_mi], aval, bpm[_ni].w);
#elif VWN == 8
- cpm[ni*VWN + 0][mi] = MultiplyAddVector(cpm[ni*VWN + 0][mi], aval, bpm[ni].s0);
- cpm[ni*VWN + 1][mi] = MultiplyAddVector(cpm[ni*VWN + 1][mi], aval, bpm[ni].s1);
- cpm[ni*VWN + 2][mi] = MultiplyAddVector(cpm[ni*VWN + 2][mi], aval, bpm[ni].s2);
- cpm[ni*VWN + 3][mi] = MultiplyAddVector(cpm[ni*VWN + 3][mi], aval, bpm[ni].s3);
- cpm[ni*VWN + 4][mi] = MultiplyAddVector(cpm[ni*VWN + 4][mi], aval, bpm[ni].s4);
- cpm[ni*VWN + 5][mi] = MultiplyAddVector(cpm[ni*VWN + 5][mi], aval, bpm[ni].s5);
- cpm[ni*VWN + 6][mi] = MultiplyAddVector(cpm[ni*VWN + 6][mi], aval, bpm[ni].s6);
- cpm[ni*VWN + 7][mi] = MultiplyAddVector(cpm[ni*VWN + 7][mi], aval, bpm[ni].s7);
+ cpm[_ni*VWN + 0][_mi] = MultiplyAddVector(cpm[_ni*VWN + 0][_mi], aval, bpm[_ni].s0);
+ cpm[_ni*VWN + 1][_mi] = MultiplyAddVector(cpm[_ni*VWN + 1][_mi], aval, bpm[_ni].s1);
+ cpm[_ni*VWN + 2][_mi] = MultiplyAddVector(cpm[_ni*VWN + 2][_mi], aval, bpm[_ni].s2);
+ cpm[_ni*VWN + 3][_mi] = MultiplyAddVector(cpm[_ni*VWN + 3][_mi], aval, bpm[_ni].s3);
+ cpm[_ni*VWN + 4][_mi] = MultiplyAddVector(cpm[_ni*VWN + 4][_mi], aval, bpm[_ni].s4);
+ cpm[_ni*VWN + 5][_mi] = MultiplyAddVector(cpm[_ni*VWN + 5][_mi], aval, bpm[_ni].s5);
+ cpm[_ni*VWN + 6][_mi] = MultiplyAddVector(cpm[_ni*VWN + 6][_mi], aval, bpm[_ni].s6);
+ cpm[_ni*VWN + 7][_mi] = MultiplyAddVector(cpm[_ni*VWN + 7][_mi], aval, bpm[_ni].s7);
#elif VWN == 16
- cpm[ni*VWN + 0 ][mi] = MultiplyAddVector(cpm[ni*VWN + 0 ][mi], aval, bpm[ni].s0);
- cpm[ni*VWN + 1 ][mi] = MultiplyAddVector(cpm[ni*VWN + 1 ][mi], aval, bpm[ni].s1);
- cpm[ni*VWN + 2 ][mi] = MultiplyAddVector(cpm[ni*VWN + 2 ][mi], aval, bpm[ni].s2);
- cpm[ni*VWN + 3 ][mi] = MultiplyAddVector(cpm[ni*VWN + 3 ][mi], aval, bpm[ni].s3);
- cpm[ni*VWN + 4 ][mi] = MultiplyAddVector(cpm[ni*VWN + 4 ][mi], aval, bpm[ni].s4);
- cpm[ni*VWN + 5 ][mi] = MultiplyAddVector(cpm[ni*VWN + 5 ][mi], aval, bpm[ni].s5);
- cpm[ni*VWN + 6 ][mi] = MultiplyAddVector(cpm[ni*VWN + 6 ][mi], aval, bpm[ni].s6);
- cpm[ni*VWN + 7 ][mi] = MultiplyAddVector(cpm[ni*VWN + 7 ][mi], aval, bpm[ni].s7);
- cpm[ni*VWN + 8 ][mi] = MultiplyAddVector(cpm[ni*VWN + 8 ][mi], aval, bpm[ni].s8);
- cpm[ni*VWN + 9 ][mi] = MultiplyAddVector(cpm[ni*VWN + 9 ][mi], aval, bpm[ni].s9);
- cpm[ni*VWN + 10][mi] = MultiplyAddVector(cpm[ni*VWN + 10][mi], aval, bpm[ni].sA);
- cpm[ni*VWN + 11][mi] = MultiplyAddVector(cpm[ni*VWN + 11][mi], aval, bpm[ni].sB);
- cpm[ni*VWN + 12][mi] = MultiplyAddVector(cpm[ni*VWN + 12][mi], aval, bpm[ni].sC);
- cpm[ni*VWN + 13][mi] = MultiplyAddVector(cpm[ni*VWN + 13][mi], aval, bpm[ni].sD);
- cpm[ni*VWN + 14][mi] = MultiplyAddVector(cpm[ni*VWN + 14][mi], aval, bpm[ni].sE);
- cpm[ni*VWN + 15][mi] = MultiplyAddVector(cpm[ni*VWN + 15][mi], aval, bpm[ni].sF);
+ cpm[_ni*VWN + 0 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 0 ][_mi], aval, bpm[_ni].s0);
+ cpm[_ni*VWN + 1 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 1 ][_mi], aval, bpm[_ni].s1);
+ cpm[_ni*VWN + 2 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 2 ][_mi], aval, bpm[_ni].s2);
+ cpm[_ni*VWN + 3 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 3 ][_mi], aval, bpm[_ni].s3);
+ cpm[_ni*VWN + 4 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 4 ][_mi], aval, bpm[_ni].s4);
+ cpm[_ni*VWN + 5 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 5 ][_mi], aval, bpm[_ni].s5);
+ cpm[_ni*VWN + 6 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 6 ][_mi], aval, bpm[_ni].s6);
+ cpm[_ni*VWN + 7 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 7 ][_mi], aval, bpm[_ni].s7);
+ cpm[_ni*VWN + 8 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 8 ][_mi], aval, bpm[_ni].s8);
+ cpm[_ni*VWN + 9 ][_mi] = MultiplyAddVector(cpm[_ni*VWN + 9 ][_mi], aval, bpm[_ni].s9);
+ cpm[_ni*VWN + 10][_mi] = MultiplyAddVector(cpm[_ni*VWN + 10][_mi], aval, bpm[_ni].sA);
+ cpm[_ni*VWN + 11][_mi] = MultiplyAddVector(cpm[_ni*VWN + 11][_mi], aval, bpm[_ni].sB);
+ cpm[_ni*VWN + 12][_mi] = MultiplyAddVector(cpm[_ni*VWN + 12][_mi], aval, bpm[_ni].sC);
+ cpm[_ni*VWN + 13][_mi] = MultiplyAddVector(cpm[_ni*VWN + 13][_mi], aval, bpm[_ni].sD);
+ cpm[_ni*VWN + 14][_mi] = MultiplyAddVector(cpm[_ni*VWN + 14][_mi], aval, bpm[_ni].sE);
+ cpm[_ni*VWN + 15][_mi] = MultiplyAddVector(cpm[_ni*VWN + 15][_mi], aval, bpm[_ni].sF);
#endif
}
}
@@ -118,25 +118,25 @@ INLINE_FUNC void MultiplyAccumulate(realM cpm[NWI][MWI/VWM], realM apm[MWI/VWM],
INLINE_FUNC void StoreResults(__global realM* cgm, realM cpm[NWI][MWI/VWM], const int kSizeM,
const real alpha, const real beta) {
#pragma unroll
- for (int ni=0; ni<NWI; ++ni) {
+ for (int _ni = 0; _ni < NWI; _ni += 1) {
#pragma unroll
- for (int mi=0; mi<MWI/VWM; ++mi) {
+ for (int _mi = 0; _mi < MWI/VWM; _mi += 1) {
#if STRM == 0
- int mg = mi + get_local_id(0)*(MWI/VWM);
+ int mg = _mi + get_local_id(0)*(MWI/VWM);
#elif STRM == 1
- int mg = get_local_id(0) + mi*MDIMC;
+ int mg = get_local_id(0) + _mi*MDIMC;
#endif
#if STRN == 0
- int ng = ni + get_local_id(1)*NWI;
+ int ng = _ni + get_local_id(1)*NWI;
#elif STRN == 1
- int ng = ni%VWN + get_local_id(1)*VWN + (ni/VWN)*VWN*NDIMC;
+ int ng = _ni%VWN + get_local_id(1)*VWN + (_ni/VWN)*VWN*NDIMC;
#endif
int idm = mg + GetGroupID0() * (MWG/VWM);
int idn = ng + GetGroupID1() * NWG;
int index = idn*(kSizeM/VWM) + idm;
realM result;
- realM xval = cpm[ni][mi];
+ realM xval = cpm[_ni][_mi];
// The final multiplication with alpha (in case beta == 0)
if (IsZero(beta)) {
diff --git a/src/kernels/level3/xgemm_part3.opencl b/src/kernels/level3/xgemm_part3.opencl
index ce24907c..4e85c4a8 100644
--- a/src/kernels/level3/xgemm_part3.opencl
+++ b/src/kernels/level3/xgemm_part3.opencl
@@ -43,7 +43,7 @@ INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK,
InitAccRegisters(cpm);
// Loops over all workgroup tiles
- for (int kwg=0; kwg<kSizeK; kwg+=KWG) {
+ for (int kwg = 0; kwg < kSizeK; kwg += KWG) {
// Loads data: off-chip --> local (matrix A)
#if SA == 1
@@ -58,14 +58,14 @@ INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK,
#endif
// Loops over all workitem tiles, unrolled by a factor KWI
- for (int pwi=0; pwi<KWG; pwi+=KWI) {
+ for (int pwi = 0; pwi < KWG; pwi += KWI) {
#pragma unroll
- for (int pit=0; pit<KWI; ++pit) {
+ for (int _pit = 0; _pit < KWI; _pit += 1) {
#if SA == 0 || SB == 0
- int idk = kwg + pwi + pit;
+ int idk = kwg + pwi + _pit;
#endif
#if SA == 1 || SB == 1
- int kg = pwi+pit;
+ int kg = pwi + _pit;
#endif
// Loads data: local --> private (matrix A)
diff --git a/test/correctness/misc/preprocessor.cpp b/test/correctness/misc/preprocessor.cpp
index d352ce37..71b59c04 100644
--- a/test/correctness/misc/preprocessor.cpp
+++ b/test/correctness/misc/preprocessor.cpp
@@ -172,7 +172,26 @@ size_t RunPreprocessor(int argc, char *argv[], const bool silent, const Precisio
;
if (TestKernel(device, context, "TransposePadMatrix", transpose_pad_sources, precision)) { passed++; } else { errors++; }
-
+ // GEMM (in-direct)
+ const auto gemm_sources =
+ "#define KWI 2\n"
+ "#define MWG 16\n"
+ "#define NWG 16\n"
+ #include "../src/kernels/level3/xgemm_part1.opencl"
+ #include "../src/kernels/level3/xgemm_part2.opencl"
+ #include "../src/kernels/level3/xgemm_part3.opencl"
+ ;
+ if (TestKernel(device, context, "Xgemm", gemm_sources, precision)) { passed++; } else { errors++; }
+
+ // GEMM (direct)
+ const auto gemm_direct_sources =
+ "#define KWID 2\n"
+ "#define WGD 16\n"
+ #include "../src/kernels/level3/xgemm_direct_part1.opencl"
+ #include "../src/kernels/level3/xgemm_direct_part2.opencl"
+ #include "../src/kernels/level3/xgemm_direct_part3.opencl"
+ ;
+ if (TestKernel(device, context, "XgemmDirectTN", gemm_direct_sources, precision)) { passed++; } else { errors++; }
// Prints and returns the statistics
std::cout << std::endl;