summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2016-10-03 20:09:15 +0200
committerCedric Nugteren <web@cedricnugteren.nl>2016-10-03 20:09:15 +0200
commitca0c075de2a73f250046876b0ca5f90dc4ef0e77 (patch)
tree54b6677c5796a95b531f9663d24d4e6f07b17146
parentc1c4bc5d209280e4ec9be5c0a26f7c94077a6b20 (diff)
Added functions to load from off-chip to local memory without vector loads for the GEMM direct kernels
-rw-r--r--src/kernels/level3/xgemm_direct_part1.opencl25
-rw-r--r--src/kernels/level3/xgemm_direct_part2.opencl93
-rw-r--r--src/kernels/level3/xgemm_direct_part3.opencl18
3 files changed, 106 insertions, 30 deletions
diff --git a/src/kernels/level3/xgemm_direct_part1.opencl b/src/kernels/level3/xgemm_direct_part1.opencl
index 2e5addef..a8bd450e 100644
--- a/src/kernels/level3/xgemm_direct_part1.opencl
+++ b/src/kernels/level3/xgemm_direct_part1.opencl
@@ -182,6 +182,31 @@ inline void GlobalToPrivateCheckedB(const __global real* restrict bgms, real bpm
// =================================================================================================
+// Caches on-chip local memory into per-thread private memory (registers). This function is specific
+// for caching the A input matrix.
+inline void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg,
+ const int a_transpose) {
+ #pragma unroll
+ for (int mi=0; mi<MWID; ++mi) {
+ const int mg = mi + get_local_id(0)*MWID;
+ const int index = (a_transpose) ? mg*(WGD + PADA) + kg : kg*(WGD + PADA) + mg;
+ apm[mi] = alm[index];
+ }
+}
+
+// Same as above, but now for the B input matrix
+inline void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg,
+ const int b_transpose) {
+ #pragma unroll
+ for (int ni=0; ni<NWID; ++ni) {
+ const int ng = ni + get_local_id(1)*NWID;
+ const int index = (b_transpose) ? ng*(WGD + PADB) + kg : kg*(WGD + PADB) + ng;
+ bpm[ni] = blm[index];
+ }
+}
+
+// =================================================================================================
+
// Merges the results in Cpm with the global array in Cgm. This also performs the multiplication
// with the constants: Cgm = alpha*A*B + beta*Cgm = alpha*Cpm + beta*Cgm
inline void StoreResultsDirect(__global real* cgm, real cpm[NWID][MWID],
diff --git a/src/kernels/level3/xgemm_direct_part2.opencl b/src/kernels/level3/xgemm_direct_part2.opencl
index 953a3b3c..5f5c6883 100644
--- a/src/kernels/level3/xgemm_direct_part2.opencl
+++ b/src/kernels/level3/xgemm_direct_part2.opencl
@@ -160,6 +160,74 @@ inline void GlobalToLocalDirectB(const __global realND* restrict bgm, __local re
}
}
+// =================================================================================================
+
+// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
+// caching the A input matrix. In contrast to the functions above, this function performs doesn't
+// use the vector data-types.
+inline void GlobalToLocalScalarA(const __global real* restrict agms, __local real* alm,
+ const int a_ld, const int a_offset, const int kwg,
+ const int a_transpose, const int a_conjugate) {
+ #if MDIMCD == MDIMAD
+ const int la0 = get_local_id(0);
+ const int la1 = get_local_id(1);
+ #else
+ const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
+ const int la0 = tid % MDIMAD;
+ const int la1 = tid / MDIMAD;
+ #endif
+ #pragma unroll
+ for (int mia=0; mia<MWAD; ++mia) {
+ #pragma unroll
+ for (int kia=0; kia<KWAD; ++kia) {
+
+ // Computes the indices for the global memory
+ int mg = mia + la0*MWAD;
+ int kg = kia + la1*KWAD;
+ int idm = (a_transpose) ? mg + kwg : mg + GetGroupID0()*WGD;
+ int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg;
+
+ // Loads the data from global memory into the local memory
+ const real result = agms[idk*a_ld + idm + a_offset];
+ if (a_conjugate) { COMPLEX_CONJUGATE(result); }
+ alm[kg*(WGD + PADA) + mg] = result;
+ }
+ }
+}
+
+// Same as above, but now for the B input matrix
+inline void GlobalToLocalScalarB(const __global real* restrict bgms, __local real* blm,
+ const int b_ld, const int b_offset, const int kwg,
+ const int b_transpose, const int b_conjugate) {
+ #if MDIMCD == NDIMBD
+ const int lb0 = get_local_id(0);
+ const int lb1 = get_local_id(1);
+ #else
+ const int tid = get_local_id(0) + MDIMCD*get_local_id(1);
+ const int lb0 = tid % NDIMBD;
+ const int lb1 = tid / NDIMBD;
+ #endif
+ #pragma unroll
+ for (int kib=0; kib<KWBD; ++kib) {
+ #pragma unroll
+ for (int nib=0; nib<NWBD; ++nib) {
+
+ // Computes the indices for the global memory
+ int ng = nib + lb0*NWBD;
+ int kg = kib + lb1*KWBD;
+ int idn = (b_transpose) ? ng + kwg : ng + GetGroupID1()*WGD;
+ int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg;
+
+ // Loads the data from global memory into the local memory
+ const real result = bgms[idk*b_ld + idn + b_offset];
+ if (b_conjugate) { COMPLEX_CONJUGATE(result); }
+ blm[kg*(WGD + PADB) + ng] = result;
+ }
+ }
+}
+
+// =================================================================================================
+
// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for
// caching the A input matrix. In contrast to the functions above, this function performs bounds
// checks and doesn't use the vector data-types.
@@ -240,31 +308,6 @@ inline void GlobalToLocalCheckedB(const __global real* restrict bgms, __local re
// =================================================================================================
-// Caches on-chip local memory into per-thread private memory (registers). This function is specific
-// for caching the A input matrix.
-inline void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg,
- const int a_transpose) {
- #pragma unroll
- for (int mi=0; mi<MWID; ++mi) {
- const int mg = mi + get_local_id(0)*MWID;
- const int index = (a_transpose) ? mg*(WGD + PADA) + kg : kg*(WGD + PADA) + mg;
- apm[mi] = alm[index];
- }
-}
-
-// Same as above, but now for the B input matrix
-inline void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg,
- const int b_transpose) {
- #pragma unroll
- for (int ni=0; ni<NWID; ++ni) {
- const int ng = ni + get_local_id(1)*NWID;
- const int index = (b_transpose) ? ng*(WGD + PADB) + kg : kg*(WGD + PADB) + ng;
- bpm[ni] = blm[index];
- }
-}
-
-// =================================================================================================
-
// End of the C++11 raw string literal
)"
diff --git a/src/kernels/level3/xgemm_direct_part3.opencl b/src/kernels/level3/xgemm_direct_part3.opencl
index 14ed8223..a9350e00 100644
--- a/src/kernels/level3/xgemm_direct_part3.opencl
+++ b/src/kernels/level3/xgemm_direct_part3.opencl
@@ -46,17 +46,25 @@ inline void XgemmDirect(const int kSizeM, const int kSizeN, const int kSizeK,
// processes only the main parts: output blocks of WGD by WGD.
const int idm = get_local_id(0) * MWID + GetGroupID0() * WGD;
const int idn = get_local_id(1) * NWID + GetGroupID1() * WGD;
-
- if ((idm < (kSizeM/WGD)*WGD) && (idn < (kSizeN/WGD)*WGD) &&
- (a_ld % VWMD == 0) && (b_ld % VWND == 0)) {
+ if ((idm < (kSizeM/WGD)*WGD) && (idn < (kSizeN/WGD)*WGD)) {
// Loops over all complete workgroup tiles (K-dimension)
int kwg = 0;
for (; kwg < (kSizeK/WGD) * WGD; kwg+=WGD) {
// Loads data: off-chip --> local (matrix A and B)
- GlobalToLocalDirectA(agm, alm, a_ld, a_offset, kwg, a_transpose, a_conjugate);
- GlobalToLocalDirectB(bgm, blm, b_ld, b_offset, kwg, b_transpose, b_conjugate);
+ if (a_ld % VWMD == 0) {
+ GlobalToLocalDirectA(agm, alm, a_ld, a_offset, kwg, a_transpose, a_conjugate);
+ }
+ else {
+ GlobalToLocalScalarA(agms, alm, a_ld, a_offset, kwg, a_transpose, a_conjugate);
+ }
+ if (b_ld % VWND == 0) {
+ GlobalToLocalDirectB(bgm, blm, b_ld, b_offset, kwg, b_transpose, b_conjugate);
+ }
+ else {
+ GlobalToLocalScalarB(bgms, blm, b_ld, b_offset, kwg, b_transpose, b_conjugate);
+ }
barrier(CLK_LOCAL_MEM_FENCE);
// Loops over all workitem tiles, unrolled by a factor KWID