diff options
Diffstat (limited to 'src/kernels/level3/xgemm_direct_part2.opencl')
-rw-r--r-- | src/kernels/level3/xgemm_direct_part2.opencl | 432 |
1 files changed, 219 insertions, 213 deletions
diff --git a/src/kernels/level3/xgemm_direct_part2.opencl b/src/kernels/level3/xgemm_direct_part2.opencl index 0d066186..953a3b3c 100644 --- a/src/kernels/level3/xgemm_direct_part2.opencl +++ b/src/kernels/level3/xgemm_direct_part2.opencl @@ -7,7 +7,7 @@ // Author(s): // Cedric Nugteren <www.cedricnugteren.nl> // -// This is part 2 of 2 of the GEMM kernel. See part 1 for more information. +// This is part 2 of 3 of the GEMM kernel. See part 1 for more information. // // ================================================================================================= @@ -17,179 +17,222 @@ R"( // ================================================================================================= -// Merges the results in Cpm with the global array in Cgm. This also performs the multiplication -// with the constants: Cgm = alpha*A*B + beta*Cgm = alpha*Cpm + beta*Cgm -inline void StoreResultsDirect(__global real* cgm, real cpm[NWID][MWID], - const int kSizeM, const int kSizeN, - const real alpha, const real beta, - const int c_ld, const int c_offset, const int c_transpose) { +// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for +// caching the A input matrix. +inline void GlobalToLocalDirectA(const __global realMD* restrict agm, __local real* alm, + const int a_ld, const int a_offset, const int kwg, + const int a_transpose, const int a_conjugate) { + #if MDIMCD == MDIMAD + const int la0 = get_local_id(0); + const int la1 = get_local_id(1); + #else + const int tid = get_local_id(0) + MDIMCD*get_local_id(1); + const int la0 = tid % MDIMAD; + const int la1 = tid / MDIMAD; + #endif #pragma unroll - for (int ni=0; ni<NWID; ++ni) { + for (int mia=0; mia<MWAD/VWMD; ++mia) { #pragma unroll - for (int mi=0; mi<MWID; ++mi) { - int mg = mi + get_local_id(0)*MWID; - int ng = ni + get_local_id(1)*NWID; - int idm = mg + GetGroupID0() * WGD; - int idn = ng + GetGroupID1() * WGD; - - // Determines the destination index - const int c_index = (c_transpose) ? idm*c_ld + idn : idn*c_ld + idm; - - // The final multiplication with alpha (in case beta == 0) - real result; - if (IsZero(beta)) { - Multiply(result, alpha, cpm[ni][mi]); - } - // The final multiplication with alpha and the addition with beta*C - else { - AXPBY(result, alpha, cpm[ni][mi], beta, cgm[c_index + c_offset]); + for (int kia=0; kia<KWAD; ++kia) { + + // Computes the indices for the global memory + int mg = mia + la0*(MWAD/VWMD); + int kg = kia + la1*KWAD; + int idm = (a_transpose) ? mg + kwg/VWMD : mg + GetGroupID0()*(WGD/VWMD); + int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg; + + // Loads the data from global memory into the local memory + const realMD avec = agm[idk*(a_ld/VWMD) + idm + a_offset]; + #if VWMD == 1 + alm[kg*(WGD + PADA) + mg] = avec; + #elif VWMD == 2 + alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.x; + alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.y; + #elif VWMD == 4 + alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.x; + alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.y; + alm[kg*(WGD + PADA) + mg*VWMD + 2] = avec.z; + alm[kg*(WGD + PADA) + mg*VWMD + 3] = avec.w; + #elif VWMD == 8 + alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.s0; + alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.s1; + alm[kg*(WGD + PADA) + mg*VWMD + 2] = avec.s2; + alm[kg*(WGD + PADA) + mg*VWMD + 3] = avec.s3; + alm[kg*(WGD + PADA) + mg*VWMD + 4] = avec.s4; + alm[kg*(WGD + PADA) + mg*VWMD + 5] = avec.s5; + alm[kg*(WGD + PADA) + mg*VWMD + 6] = avec.s6; + alm[kg*(WGD + PADA) + mg*VWMD + 7] = avec.s7; + #elif VWMD == 16 + alm[kg*(WGD + PADA) + mg*VWMD + 0] = avec.s0; + alm[kg*(WGD + PADA) + mg*VWMD + 1] = avec.s1; + alm[kg*(WGD + PADA) + mg*VWMD + 2] = avec.s2; + alm[kg*(WGD + PADA) + mg*VWMD + 3] = avec.s3; + alm[kg*(WGD + PADA) + mg*VWMD + 4] = avec.s4; + alm[kg*(WGD + PADA) + mg*VWMD + 5] = avec.s5; + alm[kg*(WGD + PADA) + mg*VWMD + 6] = avec.s6; + alm[kg*(WGD + PADA) + mg*VWMD + 7] = avec.s7; + alm[kg*(WGD + PADA) + mg*VWMD + 8] = avec.s8; + alm[kg*(WGD + PADA) + mg*VWMD + 9] = avec.s9; + alm[kg*(WGD + PADA) + mg*VWMD + 10] = avec.sA; + alm[kg*(WGD + PADA) + mg*VWMD + 11] = avec.sB; + alm[kg*(WGD + PADA) + mg*VWMD + 12] = avec.sC; + alm[kg*(WGD + PADA) + mg*VWMD + 13] = avec.sD; + alm[kg*(WGD + PADA) + mg*VWMD + 14] = avec.sE; + alm[kg*(WGD + PADA) + mg*VWMD + 15] = avec.sF; + #endif + if (a_conjugate) { + for (int vm=0; vm<VWMD; ++vm) { + COMPLEX_CONJUGATE(alm[kg*(WGD + PADA) + mg*VWMD + vm]); + } } - cgm[c_index + c_offset] = result; } } } -// ================================================================================================= - -// Main body of the kernel. This is the direct version without restrictions. -inline void XgemmDirect(const int kSizeM, const int kSizeN, const int kSizeK, - const real_arg arg_alpha, - const real_arg arg_beta, - const __global realMD* restrict agm, const int a_offset, const int a_ld, - const __global realND* restrict bgm, const int b_offset, const int b_ld, - __global real* cgm, const int c_offset, const int c_ld, - __local real* alm, __local real* blm, - const int a_transpose, const int b_transpose, const int c_transpose, - const int a_conjugate, const int b_conjugate) { - const real alpha = GetRealArg(arg_alpha); - const real beta = GetRealArg(arg_beta); - - // Extra pointers to scalar versions of global memory - const __global real* restrict agms = (const __global real* restrict) agm; - const __global real* restrict bgms = (const __global real* restrict) bgm; - - // Allocates workitem-private memory (registers) - real apm[MWID]; - real bpm[NWID]; - real cpm[NWID][MWID]; - - // Initializes the accumulation registers - InitAccRegistersDirect(cpm); - - // The faster version of GEMM is not allowed on the (incomplete) borders. Therefore, this section - // processes only the main parts: output blocks of WGD by WGD. - const int idm = get_local_id(0) * MWID + GetGroupID0() * WGD; - const int idn = get_local_id(1) * NWID + GetGroupID1() * WGD; - if ((idm < (kSizeM/WGD)*WGD) && (idn < (kSizeN/WGD)*WGD) && - (a_ld % VWMD == 0) && (b_ld % VWND == 0)) { - - // Loops over all complete workgroup tiles - int kwg = 0; - for (; kwg < (kSizeK/WGD) * WGD; kwg+=WGD) { - - // Loads data: off-chip --> local (matrix A and B) - GlobalToLocalDirectA(agm, alm, a_ld, a_offset, kwg, a_transpose, a_conjugate); - GlobalToLocalDirectB(bgm, blm, b_ld, b_offset, kwg, b_transpose, b_conjugate); - barrier(CLK_LOCAL_MEM_FENCE); - - // Loops over all workitem tiles, unrolled by a factor KWID - for (int pwi=0; pwi<WGD; pwi+=KWID) { - #pragma unroll - for (int pit=0; pit<KWID; ++pit) { - int kg = pwi + pit; - - // Loads data: local --> private (matrix A) - LocalToPrivateDirectA(alm, apm, kg, a_transpose); - - // Loads data: local --> private (matrix B) - LocalToPrivateDirectB(blm, bpm, kg, b_transpose); - - // Performs the accumulation (Cpm += Apm * Bpm) - MultiplyAccumulateDirect(cpm, apm, bpm); +// Same as above, but now for the B input matrix +inline void GlobalToLocalDirectB(const __global realND* restrict bgm, __local real* blm, + const int b_ld, const int b_offset, const int kwg, + const int b_transpose, const int b_conjugate) { + #if MDIMCD == NDIMBD + const int lb0 = get_local_id(0); + const int lb1 = get_local_id(1); + #else + const int tid = get_local_id(0) + MDIMCD*get_local_id(1); + const int lb0 = tid % NDIMBD; + const int lb1 = tid / NDIMBD; + #endif + #pragma unroll + for (int kib=0; kib<KWBD; ++kib) { + #pragma unroll + for (int nib=0; nib<NWBD/VWND; ++nib) { + + // Computes the indices for the global memory + int ng = nib + lb0*(NWBD/VWND); + int kg = kib + lb1*KWBD; + int idn = (b_transpose) ? ng + kwg/VWND : ng + GetGroupID1()*(WGD/VWND); + int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg; + + // Loads the data from global memory into the local memory + const realND bvec = bgm[idk*(b_ld/VWND) + idn + b_offset]; + #if VWND == 1 + blm[kg*(WGD + PADB) + ng] = bvec; + #elif VWND == 2 + blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.x; + blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.y; + #elif VWND == 4 + blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.x; + blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.y; + blm[kg*(WGD + PADB) + ng*VWND + 2] = bvec.z; + blm[kg*(WGD + PADB) + ng*VWND + 3] = bvec.w; + #elif VWND == 8 + blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.s0; + blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.s1; + blm[kg*(WGD + PADB) + ng*VWND + 2] = bvec.s2; + blm[kg*(WGD + PADB) + ng*VWND + 3] = bvec.s3; + blm[kg*(WGD + PADB) + ng*VWND + 4] = bvec.s4; + blm[kg*(WGD + PADB) + ng*VWND + 5] = bvec.s5; + blm[kg*(WGD + PADB) + ng*VWND + 6] = bvec.s6; + blm[kg*(WGD + PADB) + ng*VWND + 7] = bvec.s7; + #elif VWND == 16 + blm[kg*(WGD + PADB) + ng*VWND + 0] = bvec.s0; + blm[kg*(WGD + PADB) + ng*VWND + 1] = bvec.s1; + blm[kg*(WGD + PADB) + ng*VWND + 2] = bvec.s2; + blm[kg*(WGD + PADB) + ng*VWND + 3] = bvec.s3; + blm[kg*(WGD + PADB) + ng*VWND + 4] = bvec.s4; + blm[kg*(WGD + PADB) + ng*VWND + 5] = bvec.s5; + blm[kg*(WGD + PADB) + ng*VWND + 6] = bvec.s6; + blm[kg*(WGD + PADB) + ng*VWND + 7] = bvec.s7; + blm[kg*(WGD + PADB) + ng*VWND + 8] = bvec.s8; + blm[kg*(WGD + PADB) + ng*VWND + 9] = bvec.s9; + blm[kg*(WGD + PADB) + ng*VWND + 10] = bvec.sA; + blm[kg*(WGD + PADB) + ng*VWND + 11] = bvec.sB; + blm[kg*(WGD + PADB) + ng*VWND + 12] = bvec.sC; + blm[kg*(WGD + PADB) + ng*VWND + 13] = bvec.sD; + blm[kg*(WGD + PADB) + ng*VWND + 14] = bvec.sE; + blm[kg*(WGD + PADB) + ng*VWND + 15] = bvec.sF; + #endif + if (b_conjugate) { + for (int vn=0; vn<VWND; ++vn) { + COMPLEX_CONJUGATE(blm[kg*(WGD + PADB) + ng*VWND + vn]); } } - barrier(CLK_LOCAL_MEM_FENCE); - } - - // Loop over the remaining part (incomplete tile in K-dimension) - for (; kwg < kSizeK; ++kwg) { - const int idk = kwg; - - // Loads A into register memory - #pragma unroll - for (int mi=0; mi<MWID; ++mi) { - const int a_index = (a_transpose) ? (idm + mi)*a_ld + idk : idk*a_ld + (idm + mi); - apm[mi] = agms[a_index + a_offset]; - if (a_conjugate) { COMPLEX_CONJUGATE(apm[mi]); } - } - - // Loads B into register memory - #pragma unroll - for (int ni=0; ni<NWID; ++ni) { - const int b_index = (b_transpose) ? (idn + ni)*b_ld + idk : idk*b_ld + (idn + ni); - bpm[ni] = bgms[b_index + b_offset]; - if (b_conjugate) { COMPLEX_CONJUGATE(bpm[ni]); } - } - - // Performs the accumulation (Cpm += Apm * Bpm) - MultiplyAccumulateDirect(cpm, apm, bpm); } - - // Stores a tile of results and performs the multiplication with alpha and beta - StoreResultsDirect(cgm, cpm, kSizeM, kSizeN, alpha, beta, c_ld, c_offset, c_transpose); } +} - // Simple but slow version for the parts on the edge (incomplete tiles in M and N-dimensions) - else { - - // Loop over the K-dimension - for (int idk = 0; idk < kSizeK; ++idk) { - - // Loads A into register memory - #pragma unroll - for (int mi=0; mi<MWID; ++mi) { - if (idm + mi < kSizeM) { - const int a_index = (a_transpose) ? (idm + mi)*a_ld + idk : idk*a_ld + (idm + mi); - apm[mi] = agms[a_index + a_offset]; - if (a_conjugate) { COMPLEX_CONJUGATE(apm[mi]); } - } - else { - SetToZero(apm[mi]); - } +// Caches global off-chip memory into local (shared) memory on-chip. This function is specific for +// caching the A input matrix. In contrast to the functions above, this function performs bounds +// checks and doesn't use the vector data-types. +inline void GlobalToLocalCheckedA(const __global real* restrict agms, __local real* alm, + const int a_ld, const int a_offset, const int kwg, + const int a_transpose, const int a_conjugate, + const int kSizeM, const int kSizeK) { + #if MDIMCD == MDIMAD + const int la0 = get_local_id(0); + const int la1 = get_local_id(1); + #else + const int tid = get_local_id(0) + MDIMCD*get_local_id(1); + const int la0 = tid % MDIMAD; + const int la1 = tid / MDIMAD; + #endif + #pragma unroll + for (int mia=0; mia<MWAD; ++mia) { + #pragma unroll + for (int kia=0; kia<KWAD; ++kia) { + + // Computes the indices for the global memory + int mg = mia + la0*MWAD; + int kg = kia + la1*KWAD; + int idm = (a_transpose) ? mg + kwg : mg + GetGroupID0()*WGD; + int idk = (a_transpose) ? kg + GetGroupID0()*WGD : kg + kwg; + + // Loads the data from global memory into the local memory + int condition = (a_transpose) ? idm < kSizeK : idm < kSizeM; + if (condition) { + const real result = agms[idk*a_ld + idm + a_offset]; + if (a_conjugate) { COMPLEX_CONJUGATE(result); } + alm[kg*(WGD + PADA) + mg] = result; } - - // Loads B into register memory - #pragma unroll - for (int ni=0; ni<NWID; ++ni) { - if (idn + ni < kSizeN) { - const int b_index = (b_transpose) ? (idn + ni)*b_ld + idk : idk*b_ld + (idn + ni); - bpm[ni] = bgms[b_index + b_offset]; - if (b_conjugate) { COMPLEX_CONJUGATE(bpm[ni]); } - } - else { - SetToZero(bpm[ni]); - } + else { + SetToZero(alm[kg*(WGD + PADA) + mg]); } - - // Performs the accumulation (Cpm += Apm * Bpm) - MultiplyAccumulateDirect(cpm, apm, bpm); } + } +} - // Stores the results +// Same as above, but now for the B input matrix +inline void GlobalToLocalCheckedB(const __global real* restrict bgms, __local real* blm, + const int b_ld, const int b_offset, const int kwg, + const int b_transpose, const int b_conjugate, + const int kSizeN, const int kSizeK) { + #if MDIMCD == NDIMBD + const int lb0 = get_local_id(0); + const int lb1 = get_local_id(1); + #else + const int tid = get_local_id(0) + MDIMCD*get_local_id(1); + const int lb0 = tid % NDIMBD; + const int lb1 = tid / NDIMBD; + #endif + #pragma unroll + for (int kib=0; kib<KWBD; ++kib) { #pragma unroll - for (int ni=0; ni<NWID; ++ni) { - #pragma unroll - for (int mi=0; mi<MWID; ++mi) { - if ((idm + mi) < kSizeM && (idn + ni) < kSizeN) { - - // Determines the destination index - const int c_index = (c_transpose) ? (idm + mi)*c_ld + (idn + ni) : (idn + ni)*c_ld + (idm + mi); - - // Computes and stores the result - real result; - AXPBY(result, alpha, cpm[ni][mi], beta, cgm[c_index + c_offset]); - cgm[c_index + c_offset] = result; - } + for (int nib=0; nib<NWBD; ++nib) { + + // Computes the indices for the global memory + int ng = nib + lb0*NWBD; + int kg = kib + lb1*KWBD; + int idn = (b_transpose) ? ng + kwg : ng + GetGroupID1()*WGD; + int idk = (b_transpose) ? kg + GetGroupID1()*WGD : kg + kwg; + + // Loads the data from global memory into the local memory + int condition = (b_transpose) ? idn < kSizeK : idn < kSizeN; + if (condition) { + const real result = bgms[idk*b_ld + idn + b_offset]; + if (b_conjugate) { COMPLEX_CONJUGATE(result); } + blm[kg*(WGD + PADB) + ng] = result; + } + else { + SetToZero(blm[kg*(WGD + PADB) + ng]); } } } @@ -197,64 +240,27 @@ inline void XgemmDirect(const int kSizeM, const int kSizeN, const int kSizeK, // ================================================================================================= -// Direct version of the GEMM kernel with [A, B] = [non-transposed, non-transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectNN(const int kSizeM, const int kSizeN, const int kSizeK, - const real_arg arg_alpha, const real_arg arg_beta, - const __global realMD* restrict agm, const int a_offset, const int a_ld, - const __global realND* restrict bgm, const int b_offset, const int b_ld, - __global real* cgm, const int c_offset, const int c_ld, - const int c_transpose, const int a_conjugate, const int b_conjugate) { - __local real alm[WGD * (WGD + PADA)]; - __local real blm[WGD * (WGD + PADB)]; - XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta, - agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld, - alm, blm, 0, 0, c_transpose, a_conjugate, b_conjugate); -} - -// Direct version of the GEMM kernel with [A, B] = [non-transposed, transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectNT(const int kSizeM, const int kSizeN, const int kSizeK, - const real_arg arg_alpha, const real_arg arg_beta, - const __global realMD* restrict agm, const int a_offset, const int a_ld, - const __global realND* restrict bgm, const int b_offset, const int b_ld, - __global real* cgm, const int c_offset, const int c_ld, - const int c_transpose, const int a_conjugate, const int b_conjugate) { - __local real alm[WGD * (WGD + PADA)]; - __local real blm[WGD * (WGD + PADB)]; - XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta, - agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld, - alm, blm, 0, 1, c_transpose, a_conjugate, b_conjugate); -} - -// Direct version of the GEMM kernel with [A, B] = [transposed, non-transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectTN(const int kSizeM, const int kSizeN, const int kSizeK, - const real_arg arg_alpha, const real_arg arg_beta, - const __global realMD* restrict agm, const int a_offset, const int a_ld, - const __global realND* restrict bgm, const int b_offset, const int b_ld, - __global real* cgm, const int c_offset, const int c_ld, - const int c_transpose, const int a_conjugate, const int b_conjugate) { - __local real alm[WGD * (WGD + PADA)]; - __local real blm[WGD * (WGD + PADB)]; - XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta, - agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld, - alm, blm, 1, 0, c_transpose, a_conjugate, b_conjugate); +// Caches on-chip local memory into per-thread private memory (registers). This function is specific +// for caching the A input matrix. +inline void LocalToPrivateDirectA(__local real* alm, real apm[MWID], const int kg, + const int a_transpose) { + #pragma unroll + for (int mi=0; mi<MWID; ++mi) { + const int mg = mi + get_local_id(0)*MWID; + const int index = (a_transpose) ? mg*(WGD + PADA) + kg : kg*(WGD + PADA) + mg; + apm[mi] = alm[index]; + } } -// Direct version of the GEMM kernel with [A, B] = [transposed, transposed] -__attribute__((reqd_work_group_size(MDIMCD, NDIMCD, 1))) -__kernel void XgemmDirectTT(const int kSizeM, const int kSizeN, const int kSizeK, - const real_arg arg_alpha, const real_arg arg_beta, - const __global realMD* restrict agm, const int a_offset, const int a_ld, - const __global realND* restrict bgm, const int b_offset, const int b_ld, - __global real* cgm, const int c_offset, const int c_ld, - const int c_transpose, const int a_conjugate, const int b_conjugate) { - __local real alm[WGD * (WGD + PADA)]; - __local real blm[WGD * (WGD + PADB)]; - XgemmDirect(kSizeM, kSizeN, kSizeK, arg_alpha, arg_beta, - agm, a_offset, a_ld, bgm, b_offset, b_ld, cgm, c_offset, c_ld, - alm, blm, 1, 1, c_transpose, a_conjugate, b_conjugate); +// Same as above, but now for the B input matrix +inline void LocalToPrivateDirectB(__local real* blm, real bpm[NWID], const int kg, + const int b_transpose) { + #pragma unroll + for (int ni=0; ni<NWID; ++ni) { + const int ng = ni + get_local_id(1)*NWID; + const int index = (b_transpose) ? ng*(WGD + PADB) + kg : kg*(WGD + PADB) + ng; + bpm[ni] = blm[index]; + } } // ================================================================================================= |