123
123
124
124
#define GGML_CUDA_MAX_NODES 8192
125
125
126
- // define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication
127
- // on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant
128
- // for large computational tasks. the drawback is that this requires some extra amount of VRAM:
129
- // - 7B quantum model: +100-200 MB
130
- // - 13B quantum model: +200-400 MB
131
- //
132
- // #define GGML_CUDA_FORCE_MMQ
133
-
134
- // TODO: improve this to be correct for more hardware
135
- // for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores
136
- // probably other such cases, and not sure what happens on AMD hardware
137
- #if !defined(GGML_CUDA_FORCE_MMQ)
138
- #define CUDA_USE_TENSOR_CORES
139
- #endif
140
-
141
- // max batch size to use MMQ kernels when tensor cores are available
142
- #define MMQ_MAX_BATCH_SIZE 32
143
-
144
126
#if defined(GGML_USE_HIPBLAS)
145
127
#define __CUDA_ARCH__ 1300
146
128
@@ -207,6 +189,24 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
207
189
}
208
190
#endif // defined(GGML_USE_HIPBLAS)
209
191
192
+ // define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication
193
+ // on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant
194
+ // for large computational tasks. the drawback is that this requires some extra amount of VRAM:
195
+ // - 7B quantum model: +100-200 MB
196
+ // - 13B quantum model: +200-400 MB
197
+ //
198
+ // #define GGML_CUDA_FORCE_MMQ
199
+
200
+ // TODO: improve this to be correct for more hardware
201
+ // for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores
202
+ // probably other such cases, and not sure what happens on AMD hardware
203
+ #if !defined(GGML_CUDA_FORCE_MMQ) && (!defined(GGML_USE_HIPBLAS) || !defined(RDNA3))
204
+ #define CUDA_USE_TENSOR_CORES
205
+ #endif
206
+
207
+ // max batch size to use MMQ kernels when tensor cores are available
208
+ #define MMQ_MAX_BATCH_SIZE 32
209
+
210
210
#if defined(_MSC_VER)
211
211
#pragma warning(disable: 4244 4267) // possible loss of data
212
212
#endif
@@ -8661,11 +8661,26 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
8661
8661
}
8662
8662
}
8663
8663
8664
- #ifdef CUDA_USE_TENSOR_CORES
8665
- const bool use_tensor_cores = true ;
8664
+ #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
8665
+ const bool fp16_performance_good = true ;
8666
+
8667
+ #ifdef RDNA3
8668
+ const bool use_mul_mat_q = false ;
8666
8669
#else
8667
- const bool use_tensor_cores = false ;
8668
- #endif
8670
+ const bool use_mul_mat_q = true ;
8671
+ #endif // RDNA3
8672
+
8673
+ #else
8674
+
8675
+ const bool fp16_performance_good = min_compute_capability >= CC_VOLTA;
8676
+ bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized (src0->type );
8677
+ #ifdef CUDA_USE_TENSOR_CORES
8678
+ // when tensor cores are available, use them for large batch size
8679
+ // ref: https://github.com/ggerganov/llama.cpp/pull/3776
8680
+ use_mul_mat_q = use_mul_mat_q && !(fp16_performance_good && src1->ne [1 ] > MMQ_MAX_BATCH_SIZE);
8681
+ #endif // CUDA_USE_TENSOR_CORES
8682
+
8683
+ #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
8669
8684
8670
8685
// debug helpers
8671
8686
// printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
@@ -8675,13 +8690,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
8675
8690
// printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name);
8676
8691
// printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name);
8677
8692
8678
- if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted (src0) && ggml_is_permuted (src1) && src1->ne [1 ] == 1 ) {
8693
+ if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted (src0) && ggml_is_permuted (src1) && src1->ne [1 ] == 1 ) {
8679
8694
// KQ single-batch
8680
8695
ggml_cuda_mul_mat_vec_p021 (src0, src1, dst);
8681
- } else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous (src0) && !ggml_is_transposed (src1) && src1->ne [1 ] == 1 ) {
8696
+ } else if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous (src0) && !ggml_is_transposed (src1) && src1->ne [1 ] == 1 ) {
8682
8697
// KQV single-batch
8683
8698
ggml_cuda_mul_mat_vec_nc (src0, src1, dst);
8684
- } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_transposed (src0) && !ggml_is_transposed (src1)) {
8699
+ } else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed (src0) && !ggml_is_transposed (src1)) {
8685
8700
// KQ + KQV multi-batch
8686
8701
ggml_cuda_mul_mat_mat_batched_cublas (src0, src1, dst);
8687
8702
} else if (src0->type == GGML_TYPE_F32) {
@@ -8701,14 +8716,6 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
8701
8716
ggml_cuda_op_mul_mat (src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false );
8702
8717
}
8703
8718
} else {
8704
- bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized (src0->type );
8705
-
8706
- // when tensor cores are available, use them for large batch size
8707
- // ref: https://github.com/ggerganov/llama.cpp/pull/3776
8708
- if (use_tensor_cores && min_compute_capability >= CC_VOLTA && src1->ne [1 ] > MMQ_MAX_BATCH_SIZE) {
8709
- use_mul_mat_q = false ;
8710
- }
8711
-
8712
8719
if (use_mul_mat_q) {
8713
8720
ggml_cuda_op_mul_mat (src0, src1, dst, ggml_cuda_op_mul_mat_q, true );
8714
8721
} else {
0 commit comments