@@ -4476,6 +4476,13 @@ static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) {
4476
4476
*dsti = __float2half (*xi);
4477
4477
}
4478
4478
4479
+ static __device__ void cpy_1_f16_f16 (const char * cxi, char * cdsti) {
4480
+ const half * xi = (const half *) cxi;
4481
+ half * dsti = (half *) cdsti;
4482
+
4483
+ *dsti = *xi;
4484
+ }
4485
+
4479
4486
template <cpy_kernel_t cpy_1>
4480
4487
static __global__ void cpy_f32_f16 (const char * cx, char * cdst, const int ne,
4481
4488
const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
@@ -4729,6 +4736,25 @@ static __global__ void clamp_f32(const float * x, float * dst, const float min,
4729
4736
dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
4730
4737
}
4731
4738
4739
+ static __global__ void im2col_f32_f16 (
4740
+ const float * x, half * dst,
4741
+ int ofs0, int ofs1, int IW, int IH, int CHW,
4742
+ int s0, int s1, int p0, int p1, int d0, int d1) {
4743
+ const int iiw = blockIdx .z * s0 + threadIdx .z * d0 - p0;
4744
+ const int iih = blockIdx .y * s1 + threadIdx .y * d1 - p1;
4745
+
4746
+ const int offset_dst =
4747
+ (threadIdx .x * gridDim .y * gridDim .z + blockIdx .y * gridDim .z + blockIdx .z ) * CHW +
4748
+ (blockIdx .x * (blockDim .y * blockDim .z ) + threadIdx .y * blockDim .z + threadIdx .z );
4749
+
4750
+ if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
4751
+ dst[offset_dst] = __float2half (0 .0f );
4752
+ } else {
4753
+ const int offset_src = threadIdx .x * ofs0 + blockIdx .x * ofs1;
4754
+ dst[offset_dst] = __float2half (x[offset_src + iih * IW + iiw]);
4755
+ }
4756
+ }
4757
+
4732
4758
template <int qk, int qr, dequantize_kernel_t dq>
4733
4759
static void get_rows_cuda (const void * x, const int32_t * y, float * dst, const int nrows, const int ncols, cudaStream_t stream) {
4734
4760
const dim3 block_dims (CUDA_GET_ROWS_BLOCK_SIZE, 1 , 1 );
@@ -5618,6 +5644,16 @@ static void ggml_cpy_f32_f16_cuda(
5618
5644
(cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
5619
5645
}
5620
5646
5647
+ static void ggml_cpy_f16_f16_cuda (
5648
+ const char * cx, char * cdst, const int ne,
5649
+ const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
5650
+ const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) {
5651
+
5652
+ const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1 ) / CUDA_CPY_BLOCK_SIZE;
5653
+ cpy_f32_f16<cpy_1_f16_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0 , stream>>>
5654
+ (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
5655
+ }
5656
+
5621
5657
static void scale_f32_cuda (const float * x, float * dst, const float scale, const int k, cudaStream_t stream) {
5622
5658
const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1 ) / CUDA_SCALE_BLOCK_SIZE;
5623
5659
scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0 , stream>>> (x, dst, scale, k);
@@ -5701,6 +5737,15 @@ static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, c
5701
5737
soft_max_f32<<<block_nums, block_dims, 0 , stream>>> (x, dst, ncols_x);
5702
5738
}
5703
5739
5740
+ static void im2col_f32_f16_cuda (const float * x, half * dst,
5741
+ int OH, int IW, int IH, int OW, int IC,
5742
+ int KH, int KW, int N, int ofs0, int ofs1,
5743
+ int s0, int s1, int p0, int p1, int d0, int d1, cudaStream_t stream) {
5744
+ dim3 block_nums (IC, OH, OW);
5745
+ dim3 block_dims (N, KH, KW);
5746
+ im2col_f32_f16<<<block_nums, block_dims, 0 , stream>>> (x, dst, ofs0, ofs1, IW, IH, (IC * KH * KW), s0, s1, p0, p1, d0, d1);
5747
+ }
5748
+
5704
5749
// buffer pool for cuda
5705
5750
#define MAX_CUDA_BUFFERS 256
5706
5751
@@ -6483,7 +6528,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
6483
6528
src1_as_f16 = (half *) ggml_cuda_pool_malloc_async (ne * sizeof (half), &src1_as, id, stream);
6484
6529
to_fp16_cuda (src1_ddf_i, src1_as_f16, ne, stream);
6485
6530
}
6486
- const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16;
6531
+ const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16;
6487
6532
size_t dst_f16_as = 0 ;
6488
6533
half * dst_f16 = (half *) ggml_cuda_pool_malloc_async (row_diff*src1_ncols * sizeof (half), &dst_f16_as, id, stream);
6489
6534
@@ -6659,6 +6704,45 @@ inline void ggml_cuda_op_alibi(
6659
6704
(void ) src1_dd;
6660
6705
}
6661
6706
6707
+ inline void ggml_cuda_op_im2col (
6708
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
6709
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
6710
+
6711
+ GGML_ASSERT (src0->type == GGML_TYPE_F16);
6712
+ GGML_ASSERT (src1->type == GGML_TYPE_F32);
6713
+ GGML_ASSERT ( dst->type == GGML_TYPE_F16);
6714
+
6715
+ const int32_t s0 = ((const int32_t *)(dst->op_params ))[0 ];
6716
+ const int32_t s1 = ((const int32_t *)(dst->op_params ))[1 ];
6717
+ const int32_t p0 = ((const int32_t *)(dst->op_params ))[2 ];
6718
+ const int32_t p1 = ((const int32_t *)(dst->op_params ))[3 ];
6719
+ const int32_t d0 = ((const int32_t *)(dst->op_params ))[4 ];
6720
+ const int32_t d1 = ((const int32_t *)(dst->op_params ))[5 ];
6721
+
6722
+ const bool is_2D = ((const int32_t *)(dst->op_params ))[6 ] == 1 ;
6723
+
6724
+ const int64_t N = src1->ne [is_2D ? 3 : 2 ];
6725
+ const int64_t IC = src1->ne [is_2D ? 2 : 1 ];
6726
+ const int64_t IH = is_2D ? src1->ne [1 ] : 1 ;
6727
+ const int64_t IW = src1->ne [0 ];
6728
+
6729
+ const int64_t KH = is_2D ? src0->ne [1 ] : 1 ;
6730
+ const int64_t KW = src0->ne [0 ];
6731
+
6732
+ const int64_t OH = is_2D ? dst->ne [2 ] : 1 ;
6733
+ const int64_t OW = dst->ne [1 ];
6734
+
6735
+ const size_t ofs0 = src1->nb [is_2D ? 3 : 2 ] / 4 ; // nb is byte offset, src is type float32
6736
+ const size_t ofs1 = src1->nb [is_2D ? 2 : 1 ] / 4 ; // nb is byte offset, src is type float32
6737
+
6738
+ im2col_f32_f16_cuda (src1_dd, (half*) dst_dd,
6739
+ OH, IW, IH, OW, IC, KH, KW, N,
6740
+ ofs0, ofs1, s0, s1, p0, p1, d0, d1, main_stream);
6741
+
6742
+ (void ) src0;
6743
+ (void ) src0_dd;
6744
+ }
6745
+
6662
6746
inline void ggml_cuda_op_diag_mask_inf (
6663
6747
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
6664
6748
const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
@@ -7549,6 +7633,9 @@ static void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, gg
7549
7633
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
7550
7634
ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
7551
7635
ne10, ne11, nb10, nb11, nb12, main_stream);
7636
+ } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) {
7637
+ ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
7638
+ ne10, ne11, nb10, nb11, nb12, main_stream);
7552
7639
} else {
7553
7640
fprintf (stderr, " %s: unsupported type combination (%s to %s)\n " , __func__,
7554
7641
ggml_type_name (src0->type ), ggml_type_name (src1->type ));
@@ -7580,6 +7667,10 @@ static void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1,
7580
7667
ggml_cuda_op_flatten (src0, src1, dst, ggml_cuda_op_alibi);
7581
7668
}
7582
7669
7670
+ void ggml_cuda_im2col (const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
7671
+ ggml_cuda_op_flatten (src0, src1, dst, ggml_cuda_op_im2col);
7672
+ }
7673
+
7583
7674
static void ggml_cuda_nop (const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
7584
7675
(void ) src0;
7585
7676
(void ) src1;
@@ -7943,6 +8034,9 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_
7943
8034
case GGML_OP_ALIBI:
7944
8035
func = ggml_cuda_alibi;
7945
8036
break ;
8037
+ case GGML_OP_IM2COL:
8038
+ func = ggml_cuda_im2col;
8039
+ break ;
7946
8040
default :
7947
8041
return false ;
7948
8042
}
0 commit comments