// ================================================================================================= // This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This // project loosely follows the Google C++ styleguide and uses a tab-size of two spaces and a max- // width of 100 characters per line. // // Author(s): // Cedric Nugteren // // This is part 2 of 2 of the GEMM kernel. See part 1 for more information. // // ================================================================================================= // Enables loading of this file using the C++ pre-processor's #include (C++11 standard raw string // literal). Comment-out this line for syntax-highlighting when developing. R"( // ================================================================================================= // The vectorised multiply-add function inline realM MultiplyAddVector(realM cvec, const realM avec, const real bval) { #if USE_VECTOR_MAD == 1 cvec += avec * bval; #else #if VWM == 1 MultiplyAdd(cvec, avec, bval); #elif VWM == 2 MultiplyAdd(cvec.x , avec.x, bval); MultiplyAdd(cvec.y , avec.y, bval); #elif VWM == 4 MultiplyAdd(cvec.x , avec.x, bval); MultiplyAdd(cvec.y , avec.y, bval); MultiplyAdd(cvec.z , avec.z, bval); MultiplyAdd(cvec.w , avec.w, bval); #elif VWM == 8 MultiplyAdd(cvec.s0, avec.s0, bval); MultiplyAdd(cvec.s1, avec.s1, bval); MultiplyAdd(cvec.s2, avec.s2, bval); MultiplyAdd(cvec.s3, avec.s3, bval); MultiplyAdd(cvec.s4, avec.s4, bval); MultiplyAdd(cvec.s5, avec.s5, bval); MultiplyAdd(cvec.s6, avec.s6, bval); MultiplyAdd(cvec.s7, avec.s7, bval); #elif VWM == 16 MultiplyAdd(cvec.s0, avec.s0, bval); MultiplyAdd(cvec.s1, avec.s1, bval); MultiplyAdd(cvec.s2, avec.s2, bval); MultiplyAdd(cvec.s3, avec.s3, bval); MultiplyAdd(cvec.s4, avec.s4, bval); MultiplyAdd(cvec.s5, avec.s5, bval); MultiplyAdd(cvec.s6, avec.s6, bval); MultiplyAdd(cvec.s7, avec.s7, bval); MultiplyAdd(cvec.s8, avec.s8, bval); MultiplyAdd(cvec.s9, avec.s9, bval); MultiplyAdd(cvec.sA, avec.sA, bval); MultiplyAdd(cvec.sB, avec.sB, bval); MultiplyAdd(cvec.sC, avec.sC, bval); MultiplyAdd(cvec.sD, avec.sD, bval); MultiplyAdd(cvec.sE, avec.sE, bval); MultiplyAdd(cvec.sF, avec.sF, bval); #endif #endif return cvec; } // Performs the actual computation: Cpm += Apm * Bpm inline void MultiplyAccumulate(realM cpm[NWI][MWI/VWM], realM apm[MWI/VWM], realN bpm[NWI/VWN]) { #pragma unroll for (int ni=0; ni local (matrix A) #if SA == 1 GlobalToLocalA(agm, alm, kSizeM, tid, kwg); #endif // Loads data: off-chip --> local (matrix B) #if SB == 1 GlobalToLocalB(bgm, blm, kSizeN, tid, kwg); #endif #if SA == 1 || SB == 1 barrier(CLK_LOCAL_MEM_FENCE); #endif // Loops over all workitem tiles, unrolled by a factor KWI for (int pwi=0; pwi private (matrix A) #if SA == 1 LocalToPrivateA(alm, apm, kg); // Loads data: off-chip --> private (matrix A) #else GlobalToPrivateA(agm, apm, kSizeM, idk, kwg); #endif // Loads data: local --> private (matrix B) #if SB == 1 LocalToPrivateB(blm, bpm, kg); // Loads data: off-chip --> private (matrix B) #else GlobalToPrivateB(bgm, bpm, kSizeN, idk); #endif // Performs the accumulation (Cpm += Apm * Bpm) MultiplyAccumulate(cpm, apm, bpm); } } #if SA == 1 || SB == 1 barrier(CLK_LOCAL_MEM_FENCE); #endif } #if GLOBAL_MEM_FENCE == 1 barrier(CLK_GLOBAL_MEM_FENCE); #endif } // ================================================================================================= // The upper-triangular and lower-triangular kernels are only used in special cases #if defined(ROUTINE_SYRK) || defined(ROUTINE_HERK) || defined(ROUTINE_SYR2K) || defined(ROUTINE_HER2K) // Main entry point of the kernel. This is the upper-triangular version. __attribute__((reqd_work_group_size(MDIMC, NDIMC, 1))) __kernel void XgemmUpper(const int kSizeN, const int kSizeK, const __constant real* restrict arg_alpha, const __constant real* restrict arg_beta, const __global realM* restrict agm, const __global realN* restrict bgm, __global realM* cgm) { const real alpha = arg_alpha[0]; const real beta = arg_beta[0]; // Skip these threads if they do not contain threads contributing to the upper-triangle if (GetGroupID1()*NWG < GetGroupID0()*MWG) { return; } // Allocates workgroup-private memory (local memory) #if SA == 1 __local realM alm[KWG * MWG/VWM]; #endif #if SB == 1 __local realN blm[KWG * NWG/VWN]; #endif // Computes the matrix-multiplication and stores the result in register memory realM cpm[NWI][MWI/VWM]; #if SA == 1 && SB == 1 XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm, alm, blm); #elif SA == 1 XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm, alm); #elif SB == 1 XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm, blm); #else XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm); #endif // Stores an MWG * NWG tile of results and performs the multiplication with alpha and beta StoreResults(cgm, cpm, kSizeN, alpha, beta); } // Main entry point of the kernel. This is the lower-triangular version. __attribute__((reqd_work_group_size(MDIMC, NDIMC, 1))) __kernel void XgemmLower(const int kSizeN, const int kSizeK, const __constant real* restrict arg_alpha, const __constant real* restrict arg_beta, const __global realM* restrict agm, const __global realN* restrict bgm, __global realM* cgm) { const real alpha = arg_alpha[0]; const real beta = arg_beta[0]; // Skip these threads if they do not contain threads contributing to the lower-triangle if (GetGroupID1()*NWG > GetGroupID0()*MWG) { return; } // Allocates workgroup-private memory (local memory) #if SA == 1 __local realM alm[KWG * MWG/VWM]; #endif #if SB == 1 __local realN blm[KWG * NWG/VWN]; #endif // Computes the matrix-multiplication and stores the result in register memory realM cpm[NWI][MWI/VWM]; #if SA == 1 && SB == 1 XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm, alm, blm); #elif SA == 1 XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm, alm); #elif SB == 1 XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm, blm); #else XgemmBody(kSizeN, kSizeN, kSizeK, agm, bgm, cgm, cpm); #endif // Stores an MWG * NWG tile of results and performs the multiplication with alpha and beta StoreResults(cgm, cpm, kSizeN, alpha, beta); } // ================================================================================================= // If not using a triangular version, include the regular kernel #else // Main entry point of the kernel. This is the regular full version. __attribute__((reqd_work_group_size(MDIMC, NDIMC, 1))) __kernel void Xgemm(const int kSizeM, const int kSizeN, const int kSizeK, const __constant real* restrict arg_alpha, const __constant real* restrict arg_beta, const __global realM* restrict agm, const __global realN* restrict bgm, __global realM* cgm) { const real alpha = arg_alpha[0]; const real beta = arg_beta[0]; // Allocates workgroup-private memory (local memory) #if SA == 1 __local realM alm[KWG * MWG/VWM]; #endif #if SB == 1 __local realN blm[KWG * NWG/VWN]; #endif // Computes the matrix-multiplication and stores the result in register memory realM cpm[NWI][MWI/VWM]; #if SA == 1 && SB == 1 XgemmBody(kSizeM, kSizeN, kSizeK, agm, bgm, cgm, cpm, alm, blm); #elif SA == 1 XgemmBody(kSizeM, kSizeN, kSizeK, agm, bgm, cgm, cpm, alm); #elif SB == 1 XgemmBody(kSizeM, kSizeN, kSizeK, agm, bgm, cgm, cpm, blm); #else XgemmBody(kSizeM, kSizeN, kSizeK, agm, bgm, cgm, cpm); #endif // Stores an MWG * NWG tile of results and performs the multiplication with alpha and beta StoreResults(cgm, cpm, kSizeM, alpha, beta); } #endif // ================================================================================================= // End of the C++11 raw string literal )" // =================================================================================================