summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/clpp11.hpp7
-rw-r--r--src/cupp11.hpp5
-rw-r--r--src/kernels/level3/xgemm_part1.opencl20
-rw-r--r--src/kernels/level3/xgemm_part3.opencl42
-rw-r--r--src/utilities/compile.cpp14
5 files changed, 84 insertions, 4 deletions
diff --git a/src/clpp11.hpp b/src/clpp11.hpp
index 8d6a1127..8ac0523f 100644
--- a/src/clpp11.hpp
+++ b/src/clpp11.hpp
@@ -44,6 +44,7 @@
#include <numeric> // std::accumulate
#include <cstring> // std::strlen
#include <cstdio> // fprintf, stderr
+#include <assert.h>
// OpenCL
#define CL_USE_DEPRECATED_OPENCL_1_1_APIS // to disable deprecation warnings
@@ -355,6 +356,12 @@ class Device {
std::string{"."} + std::to_string(GetInfo<cl_uint>(CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV));
}
+ // Returns if the Nvidia chip is a Volta or later archicture (sm_70 or higher)
+ bool IsPostNVIDIAVolta() const {
+ assert(HasExtension("cl_nv_device_attribute_query"));
+ return GetInfo<cl_uint>(CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV) >= 7;
+ }
+
// Retrieves the above extra information (if present)
std::string GetExtraInfo() const {
if (HasExtension("cl_amd_device_attribute_query")) { return AMDBoardName(); }
diff --git a/src/cupp11.hpp b/src/cupp11.hpp
index a1cb1614..ce765844 100644
--- a/src/cupp11.hpp
+++ b/src/cupp11.hpp
@@ -327,6 +327,11 @@ public:
std::string AMDBoardName() const { return ""; }
std::string NVIDIAComputeCapability() const { return Capabilities(); }
+ // Returns if the Nvidia chip is a Volta or later archicture (major version 7 or higher)
+ bool IsPostNVIDIAVolta() const {
+ return GetInfo(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR) >= 7;
+ }
+
// Retrieves the above extra information
std::string GetExtraInfo() const { return NVIDIAComputeCapability(); }
diff --git a/src/kernels/level3/xgemm_part1.opencl b/src/kernels/level3/xgemm_part1.opencl
index 99d64c91..3cfc5dfb 100644
--- a/src/kernels/level3/xgemm_part1.opencl
+++ b/src/kernels/level3/xgemm_part1.opencl
@@ -114,13 +114,29 @@ R"(
#define GLOBAL_MEM_FENCE 0 // Global synchronisation barrier for potential better performance
#endif
-// Intel subgroups (https://www.khronos.org/registry/OpenCL/extensions/intel/cl_intel_subgroups.txt)
+#ifndef SUBGROUP_SHUFFLING_NVIDIA_PRE_VOLTA
+ #define SUBGROUP_SHUFFLING_NVIDIA_PRE_VOLTA 0
+#endif
+#ifndef SUBGROUP_SHUFFLING_NVIDIA_POST_VOLTA
+ #define SUBGROUP_SHUFFLING_NVIDIA_POST_VOLTA 0
+#endif
+#ifndef SUBGROUP_SHUFFLING_INTEL
+ #define SUBGROUP_SHUFFLING_INTEL 0
+#endif
#ifndef USE_SUBGROUP_SHUFFLING
#define USE_SUBGROUP_SHUFFLING 0 // Optionally enables subgroup shuffling for Intel GPUs
#endif
-#if USE_SUBGROUP_SHUFFLING == 1
+
+// Intel subgroups (https://www.khronos.org/registry/OpenCL/extensions/intel/cl_intel_subgroups.txt)
+#if USE_SUBGROUP_SHUFFLING == 1 && SUBGROUP_SHUFFLING_INTEL
#define SUBGROUP_SIZE 8 // Assumes subgroup size is always 8 on Intel GPUs
#endif
+
+// NVIDIA warps as subgroups using inline PTX (https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html)
+#if USE_SUBGROUP_SHUFFLING == 1 && (SUBGROUP_SHUFFLING_NVIDIA_PRE_VOLTA || SUBGROUP_SHUFFLING_NVIDIA_POST_VOLTA)
+ #define SUBGROUP_SIZE 32 // Assumes subgroup size is always 32 on NVIDIA GPUs
+#endif
+
#if NWI != SUBGROUP_SIZE || MDIMC < SUBGROUP_SIZE
#undef USE_SUBGROUP_SHUFFLING
#define USE_SUBGROUP_SHUFFLING 0 // Disables subgroups in case the assumptions don't hold
diff --git a/src/kernels/level3/xgemm_part3.opencl b/src/kernels/level3/xgemm_part3.opencl
index c3920cb5..35ec735c 100644
--- a/src/kernels/level3/xgemm_part3.opencl
+++ b/src/kernels/level3/xgemm_part3.opencl
@@ -17,6 +17,44 @@ R"(
// =================================================================================================
+// A common interface for subgroup functions
+
+#if USE_SUBGROUP_SHUFFLING == 1
+
+INLINE_FUNC int clblast_get_sub_group_local_id() {
+
+ // Intel extension
+ #if SUBGROUP_SHUFFLING_INTEL == 1
+ return get_sub_group_local_id();
+
+ // Nvidia inline PTX
+ #elif SUBGROUP_SHUFFLING_NVIDIA_PRE_VOLTA == 1 || SUBGROUP_SHUFFLING_NVIDIA_POST_VOLTA == 1
+ int ret;
+ asm volatile("mov.u32 %0, %%laneid;" : "=r"(ret) );
+ return ret;
+ #endif
+}
+
+INLINE_FUNC realN clblast_sub_group_shuffle(realN reg, int src) {
+
+ // Intel extension
+ #if SUBGROUP_SHUFFLING_INTEL == 1
+ return intel_sub_group_shuffle(reg, src);
+
+ // Nvidia inline PTX
+ // Volta and later requires .sync shuffle instructions with an extra mask arg
+ #elif SUBGROUP_SHUFFLING_NVIDIA_PRE_VOLTA == 1 || SUBGROUP_SHUFFLING_NVIDIA_POST_VOLTA == 1
+ realN ret;
+ #if SUBGROUP_SHUFFLING_NVIDIA_POST_VOLTA == 1
+ asm volatile("shfl.sync.idx.b32 %0, %1, %2, 0x1f, 0xffffffff;" : "=f"(ret): "f"(reg), "r"(src));
+ #else
+ asm volatile("shfl.idx.b32 %0, %1, %2, 0x1f;" : "=f"(ret): "f"(reg), "r"(src));
+ #endif
+ return ret;
+ #endif
+}
+#endif
+
// Main body of the matrix-multiplication algorithm. It calls various (inlined) functions.
INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK,
const __global realM* restrict agm, const __global realN* restrict bgm,
@@ -130,7 +168,7 @@ INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK,
#elif GEMMK == 1
// Loads data: 2D global --> 2D private (matrix A). Partly, shuffled later among subgroups
#if USE_SUBGROUP_SHUFFLING == 1
- const int _ni = get_sub_group_local_id();
+ const int _ni = clblast_get_sub_group_local_id();
#pragma unroll
for (int _ki = 0; _ki < KREG/VWN; _ki += 1) {
apm[_ki] = GlobalToPrivateA2D(a_ptr, tid_y, _ni, kSizeK, idk, _ki);
@@ -202,7 +240,7 @@ INLINE_FUNC void XgemmBody(const int kSizeM, const int kSizeN, const int kSizeK,
for (int _ki = 0; _ki < KREG/VWN; _ki += 1) {
const int index = _ni * (MWI/VWM) + _mi;
#if USE_SUBGROUP_SHUFFLING == 1
- const realN aval = intel_sub_group_shuffle(apm[_ki], _ni);
+ const realN aval = clblast_sub_group_shuffle(apm[_ki], _ni);
#else
const realN aval = apm[_ni * (KREG/VWN) + _ki];
#endif
diff --git a/src/utilities/compile.cpp b/src/utilities/compile.cpp
index 05c29944..835f54b4 100644
--- a/src/utilities/compile.cpp
+++ b/src/utilities/compile.cpp
@@ -61,8 +61,22 @@ std::shared_ptr<Program> CompileFromSource(
// For Intel GPUs with subgroup support, use subgroup shuffling.
if (device.IsGPU() && device.HasExtension(kKhronosIntelSubgroups)) {
header_string += "#define USE_SUBGROUP_SHUFFLING 1\n";
+ header_string += "#define SUBGROUP_SHUFFLING_INTEL 1\n";
}
+ // For NVIDIA GPUs, inline PTX can provide subgroup support
+ if (device.IsGPU() && device.IsNVIDIA() && precision == Precision::kSingle) {
+ header_string += "#define USE_SUBGROUP_SHUFFLING 1\n";
+
+ // Nvidia needs to check pre or post volta due to new shuffle commands
+ if (device.IsPostNVIDIAVolta()) {
+ header_string += "#define SUBGROUP_SHUFFLING_NVIDIA_POST_VOLTA 1\n";
+ }
+ else {
+ header_string += "#define SUBGROUP_SHUFFLING_NVIDIA_PRE_VOLTA 1\n";
+ }
+ }
+
// Optionally adds a translation header from OpenCL kernels to CUDA kernels
#ifdef CUDA_API
header_string +=