@@ -6385,19 +6385,27 @@ inline void ggml_cuda_op_mul_mat_cublas(
6385
6385
}
6386
6386
const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16;
6387
6387
6388
- const float alpha = 1.0f;
6389
- const float beta = 0.0f;
6388
+ size_t dst_as = 0;
6389
+ half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as);
6390
+
6391
+ const half alpha = 1.0f;
6392
+ const half beta = 0.0f;
6390
6393
6391
6394
CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream));
6392
6395
CUBLAS_CHECK(
6393
6396
cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
6394
6397
row_diff, src1_ncols, ne10,
6395
6398
&alpha, src0_ptr, CUDA_R_16F, ne00,
6396
6399
src1_ptr, CUDA_R_16F, ne10,
6397
- &beta, dst_dd_i, CUDA_R_32F , ldc,
6398
- CUBLAS_COMPUTE_32F ,
6400
+ &beta, dst_f16, CUDA_R_16F , ldc,
6401
+ CUBLAS_COMPUTE_16F ,
6399
6402
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
6400
6403
6404
+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
6405
+ to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream);
6406
+
6407
+ ggml_cuda_pool_free(dst_f16, dst_as);
6408
+
6401
6409
if (src0_as != 0) {
6402
6410
ggml_cuda_pool_free(src0_as_f16, src0_as);
6403
6411
}
0 commit comments