@@ -7579,8 +7579,7 @@ static void ggml_cuda_op_mul_mat_cublas(
7579
7579
7580
7580
const int compute_capability = g_device_caps[id].cc ;
7581
7581
7582
- if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized (src0->type )) && ggml_is_contiguous (src0) && row_diff == src0->ne [1 ] && dst->op_params [0 ] == GGML_PREC_DEFAULT) {
7583
- // printf("this branch\n");
7582
+ if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized (src0->type )) && ggml_is_contiguous (src0) && row_diff == src0->ne [1 ]) {
7584
7583
// convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32
7585
7584
cuda_pool_alloc<half> src0_as_f16;
7586
7585
if (src0->type != GGML_TYPE_F16) {
@@ -7601,23 +7600,44 @@ static void ggml_cuda_op_mul_mat_cublas(
7601
7600
to_fp16_cuda (src1_ddf_i, src1_as_f16.get (), ne, stream);
7602
7601
}
7603
7602
const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16.get ();
7604
- cuda_pool_alloc<half> dst_f16 (row_diff*src1_ncols);
7605
7603
7606
- const half alpha_f16 = 1 .0f ;
7607
- const half beta_f16 = 0 .0f ;
7608
-
7609
- CUBLAS_CHECK (cublasSetStream (g_cublas_handles[id], stream));
7610
- CUBLAS_CHECK (
7611
- cublasGemmEx (g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
7612
- row_diff, src1_ncols, ne10,
7613
- &alpha_f16, src0_ptr, CUDA_R_16F, ne00,
7614
- src1_ptr, CUDA_R_16F, ne10,
7615
- &beta_f16, dst_f16.get (), CUDA_R_16F, ldc,
7616
- CUBLAS_COMPUTE_16F,
7617
- CUBLAS_GEMM_DEFAULT_TENSOR_OP));
7618
-
7619
- const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (GGML_TYPE_F16);
7620
- to_fp32_cuda (dst_f16.get (), dst_dd_i, row_diff*src1_ncols, stream);
7604
+ switch (dst->op_params [0 ]) {
7605
+ case GGML_PREC_DEFAULT:
7606
+ {
7607
+ cuda_pool_alloc<half> dst_f16 (row_diff*src1_ncols);
7608
+
7609
+ const half alpha_f16 = 1 .0f ;
7610
+ const half beta_f16 = 0 .0f ;
7611
+
7612
+ CUBLAS_CHECK (cublasSetStream (g_cublas_handles[id], stream));
7613
+ CUBLAS_CHECK (
7614
+ cublasGemmEx (g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
7615
+ row_diff, src1_ncols, ne10,
7616
+ &alpha_f16, src0_ptr, CUDA_R_16F, ne00,
7617
+ src1_ptr, CUDA_R_16F, ne10,
7618
+ &beta_f16, dst_f16.get (), CUDA_R_16F, ldc,
7619
+ CUBLAS_COMPUTE_16F,
7620
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP));
7621
+
7622
+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (GGML_TYPE_F16);
7623
+ to_fp32_cuda (dst_f16.get (), dst_dd_i, row_diff*src1_ncols, stream);
7624
+ } break ;
7625
+ case GGML_PREC_F32:
7626
+ {
7627
+ const float alpha_f32 = 1 .0f ;
7628
+ const float beta_f32 = 0 .0f ;
7629
+
7630
+ CUBLAS_CHECK (cublasSetStream (g_cublas_handles[id], stream));
7631
+ CUBLAS_CHECK (
7632
+ cublasGemmEx (g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
7633
+ row_diff, src1_ncols, ne10,
7634
+ &alpha_f32, src0_ptr, CUDA_R_16F, ne00,
7635
+ src1_ptr, CUDA_R_16F, ne10,
7636
+ &beta_f32, dst_dd_i, CUDA_R_32F, ldc,
7637
+ CUBLAS_COMPUTE_32F,
7638
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP));
7639
+ } break ;
7640
+ }
7621
7641
} else {
7622
7642
cuda_pool_alloc<float > src0_ddq_as_f32;
7623
7643
cuda_pool_alloc<float > src1_ddq_as_f32;
@@ -7635,7 +7655,7 @@ static void ggml_cuda_op_mul_mat_cublas(
7635
7655
to_fp32_cuda (src1_ddf_i, src1_ddq_as_f32.get (), src1_ncols*ne10, stream);
7636
7656
}
7637
7657
7638
- const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get ();
7658
+ const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get ();
7639
7659
const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get ();
7640
7660
7641
7661
const float alpha = 1 .0f ;
@@ -9234,6 +9254,20 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
9234
9254
}
9235
9255
9236
9256
void ggml_cuda_free_data (struct ggml_tensor * tensor) {
9257
+ // print current mem usage using cudaMemGetInfo
9258
+ // TODO: this is a hack - need better solution
9259
+ {
9260
+ size_t free ;
9261
+ size_t total;
9262
+ CUDA_CHECK (cudaMemGetInfo (&free , &total));
9263
+
9264
+ static size_t used = 0 ;
9265
+ if (used < total - free ) {
9266
+ printf (" CUDA: used %zu MB, free %zu MB\n " , (total - free )/1024 /1024 , free /1024 /1024 );
9267
+ used = total - free ;
9268
+ }
9269
+ }
9270
+
9237
9271
if (!tensor || !tensor->extra || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) {
9238
9272
return ;
9239
9273
}
0 commit comments