Skip to content

Commit a40f611

Browse files
committed
ggml : force F32 precision for ggml_mul_mat
1 parent a7aee47 commit a40f611

File tree

2 files changed

+47
-20
lines changed

2 files changed

+47
-20
lines changed

ggml-cuda.cu

Lines changed: 41 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -7389,7 +7389,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
73897389

73907390
const int compute_capability = g_compute_capabilities[id];
73917391

7392-
if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) {
7392+
if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) {
73937393
// convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32
73947394
half * src0_as_f16 = nullptr;
73957395
size_t src0_as = 0;
@@ -7412,26 +7412,47 @@ inline void ggml_cuda_op_mul_mat_cublas(
74127412
to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream);
74137413
}
74147414
const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16;
7415-
size_t dst_as = 0;
7416-
half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as);
74177415

7418-
const half alpha_f16 = 1.0f;
7419-
const half beta_f16 = 0.0f;
7420-
7421-
CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream));
7422-
CUBLAS_CHECK(
7423-
cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
7424-
row_diff, src1_ncols, ne10,
7425-
&alpha_f16, src0_ptr, CUDA_R_16F, ne00,
7426-
src1_ptr, CUDA_R_16F, ne10,
7427-
&beta_f16, dst_f16, CUDA_R_16F, ldc,
7428-
CUBLAS_COMPUTE_16F,
7429-
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
7430-
7431-
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
7432-
to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream);
7433-
7434-
ggml_cuda_pool_free(dst_f16, dst_as);
7416+
switch (dst->op_params[0]) {
7417+
case GGML_PREC_DEFAULT:
7418+
{
7419+
size_t dst_as = 0;
7420+
half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as);
7421+
7422+
const half alpha_f16 = 1.0f;
7423+
const half beta_f16 = 0.0f;
7424+
7425+
CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream));
7426+
CUBLAS_CHECK(
7427+
cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
7428+
row_diff, src1_ncols, ne10,
7429+
&alpha_f16, src0_ptr, CUDA_R_16F, ne00,
7430+
src1_ptr, CUDA_R_16F, ne10,
7431+
&beta_f16, dst_f16, CUDA_R_16F, ldc,
7432+
CUBLAS_COMPUTE_16F,
7433+
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
7434+
7435+
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
7436+
to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream);
7437+
7438+
ggml_cuda_pool_free(dst_f16, dst_as);
7439+
} break;
7440+
case GGML_PREC_F32:
7441+
{
7442+
const float alpha_f32 = 1.0f;
7443+
const float beta_f32 = 0.0f;
7444+
7445+
CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream));
7446+
CUBLAS_CHECK(
7447+
cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
7448+
row_diff, src1_ncols, ne10,
7449+
&alpha_f32, src0_ptr, CUDA_R_16F, ne00,
7450+
src1_ptr, CUDA_R_16F, ne10,
7451+
&beta_f32, dst_dd_i, CUDA_R_32F, ldc,
7452+
CUBLAS_COMPUTE_32F,
7453+
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
7454+
} break;
7455+
}
74357456

74367457
if (src0_as != 0) {
74377458
ggml_cuda_pool_free(src0_as_f16, src0_as);

ggml.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4090,6 +4090,12 @@ struct ggml_tensor * ggml_mul_mat(
40904090
const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
40914091
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
40924092

4093+
// TMP: force f32 precision
4094+
{
4095+
const int32_t prec_i32 = GGML_PREC_F32;
4096+
ggml_set_op_params_i32(result, 0, prec_i32);
4097+
}
4098+
40934099
result->op = GGML_OP_MUL_MAT;
40944100
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
40954101
result->src[0] = a;

0 commit comments

Comments
 (0)