Skip to content

Commit 1ef8e50

Browse files
JohannesGaesslerhodlen
authored andcommitted
CUDA: fix tensor core logic for Pascal and HIP (ggml-org#4682)
1 parent 44624d6 commit 1ef8e50

File tree

1 file changed

+39
-33
lines changed

1 file changed

+39
-33
lines changed

ggml-cuda.cu

Lines changed: 39 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -123,24 +123,6 @@
123123

124124
#define GGML_CUDA_MAX_NODES 8192
125125

126-
// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication
127-
// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant
128-
// for large computational tasks. the drawback is that this requires some extra amount of VRAM:
129-
// - 7B quantum model: +100-200 MB
130-
// - 13B quantum model: +200-400 MB
131-
//
132-
//#define GGML_CUDA_FORCE_MMQ
133-
134-
// TODO: improve this to be correct for more hardware
135-
// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores
136-
// probably other such cases, and not sure what happens on AMD hardware
137-
#if !defined(GGML_CUDA_FORCE_MMQ)
138-
#define CUDA_USE_TENSOR_CORES
139-
#endif
140-
141-
// max batch size to use MMQ kernels when tensor cores are available
142-
#define MMQ_MAX_BATCH_SIZE 32
143-
144126
#if defined(GGML_USE_HIPBLAS)
145127
#define __CUDA_ARCH__ 1300
146128

@@ -207,6 +189,23 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
207189
}
208190
#endif // defined(GGML_USE_HIPBLAS)
209191

192+
// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication
193+
// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant
194+
// for large computational tasks. the drawback is that this requires some extra amount of VRAM:
195+
// - 7B quantum model: +100-200 MB
196+
// - 13B quantum model: +200-400 MB
197+
//
198+
//#define GGML_CUDA_FORCE_MMQ
199+
200+
// TODO: improve this to be correct for more hardware
201+
// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores
202+
#if !defined(GGML_CUDA_FORCE_MMQ) && (!defined(GGML_USE_HIPBLAS) || defined(RDNA3))
203+
#define CUDA_USE_TENSOR_CORES
204+
#endif
205+
206+
// max batch size to use MMQ kernels when tensor cores are available
207+
#define MMQ_MAX_BATCH_SIZE 32
208+
210209
#if defined(_MSC_VER)
211210
#pragma warning(disable: 4244 4267) // possible loss of data
212211
#endif
@@ -8661,11 +8660,26 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
86618660
}
86628661
}
86638662

8664-
#ifdef CUDA_USE_TENSOR_CORES
8665-
const bool use_tensor_cores = true;
8663+
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
8664+
const bool fp16_performance_good = true;
8665+
8666+
#ifdef RDNA3
8667+
const bool use_mul_mat_q = false;
86668668
#else
8667-
const bool use_tensor_cores = false;
8668-
#endif
8669+
const bool use_mul_mat_q = true;
8670+
#endif // RDNA3
8671+
8672+
#else
8673+
8674+
const bool fp16_performance_good = min_compute_capability >= CC_VOLTA;
8675+
bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type);
8676+
#ifdef CUDA_USE_TENSOR_CORES
8677+
// when tensor cores are available, use them for large batch size
8678+
// ref: https://github.com/ggerganov/llama.cpp/pull/3776
8679+
use_mul_mat_q = use_mul_mat_q && !(fp16_performance_good && src1->ne[1] > MMQ_MAX_BATCH_SIZE);
8680+
#endif // CUDA_USE_TENSOR_CORES
8681+
8682+
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
86698683

86708684
// debug helpers
86718685
//printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
@@ -8675,13 +8689,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
86758689
//printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name);
86768690
//printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name);
86778691

8678-
if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
8692+
if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
86798693
// KQ single-batch
86808694
ggml_cuda_mul_mat_vec_p021(src0, src1, dst);
8681-
} else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
8695+
} else if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
86828696
// KQV single-batch
86838697
ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
8684-
} else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
8698+
} else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
86858699
// KQ + KQV multi-batch
86868700
ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst);
86878701
} else if (src0->type == GGML_TYPE_F32) {
@@ -8701,14 +8715,6 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
87018715
ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false);
87028716
}
87038717
} else {
8704-
bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type);
8705-
8706-
// when tensor cores are available, use them for large batch size
8707-
// ref: https://github.com/ggerganov/llama.cpp/pull/3776
8708-
if (use_tensor_cores && min_compute_capability >= CC_VOLTA && src1->ne[1] > MMQ_MAX_BATCH_SIZE) {
8709-
use_mul_mat_q = false;
8710-
}
8711-
87128718
if (use_mul_mat_q) {
87138719
ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_q, true);
87148720
} else {

0 commit comments

Comments
 (0)