@@ -7414,6 +7414,8 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
7414
7414
(src1->backend == GGML_BACKEND_GPU) &&
7415
7415
( dst->backend == GGML_BACKEND_GPU);
7416
7416
7417
+ const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
7418
+
7417
7419
int64_t min_compute_capability = INT_MAX;
7418
7420
for (int64_t id = 0 ; id < g_device_count; ++id) {
7419
7421
if (min_compute_capability > g_compute_capabilities[id] && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1 ] : 1 .0f )) {
@@ -7435,13 +7437,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
7435
7437
// printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name);
7436
7438
// printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name);
7437
7439
7438
- if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted (src0) && ggml_is_permuted (src1) && src1->ne [1 ] == 1 ) {
7440
+ if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted (src0) && ggml_is_permuted (src1) && src1->ne [1 ] == 1 ) {
7439
7441
// KQ single-batch
7440
7442
ggml_cuda_mul_mat_vec_p021 (src0, src1, dst);
7441
- } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous (src0) && !ggml_is_transposed (src1) && src1->ne [1 ] == 1 ) {
7443
+ } else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous (src0) && !ggml_is_transposed (src1) && src1->ne [1 ] == 1 ) {
7442
7444
// KQV single-batch
7443
7445
ggml_cuda_mul_mat_vec_nc (src0, src1, dst);
7444
- } else if (all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed (src0) && !ggml_is_transposed (src1)) {
7446
+ } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed (src0) && !ggml_is_transposed (src1)) {
7445
7447
// KQ + KQV multi-batch
7446
7448
ggml_cuda_mul_mat_mat_batched_cublas (src0, src1, dst);
7447
7449
} else if (src0->type == GGML_TYPE_F32) {
0 commit comments