@@ -555,8 +555,8 @@ static enum ggml_status ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer
555
555
556
556
if (ggml_is_quantized (tensor->type ) && tensor->view_src == nullptr && ggml_backend_buffer_get_usage (buffer) != GGML_BACKEND_BUFFER_USAGE_COMPUTE) {
557
557
// initialize padding to 0 to avoid possible NaN values
558
- size_t original_size = ggml_nbytes (tensor);
559
- size_t padded_size = ggml_backend_buft_get_alloc_size (buffer->buft , tensor);
558
+ const size_t original_size = ggml_nbytes (tensor);
559
+ const size_t padded_size = ggml_backend_buft_get_alloc_size (buffer->buft , tensor);
560
560
561
561
if (padded_size > original_size) {
562
562
ggml_cuda_set_device (ctx->device );
@@ -679,6 +679,7 @@ static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_t
679
679
680
680
if (ggml_is_quantized (tensor->type )) {
681
681
if (ne0 % MATRIX_ROW_PADDING != 0 ) {
682
+ GGML_ASSERT (tensor->nb [0 ] == ggml_element_size (tensor));
682
683
size += ggml_row_size (tensor->type , MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
683
684
}
684
685
}
@@ -800,6 +801,7 @@ static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buff
800
801
801
802
static enum ggml_status ggml_backend_cuda_split_buffer_init_tensor (ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
802
803
GGML_ASSERT (tensor->view_src == nullptr ); // views of split tensors are not supported
804
+ GGML_ASSERT (ggml_is_contiguous (tensor) && " split buffers only supported for contiguous tensors" );
803
805
804
806
ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context ;
805
807
ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft ->context ;
@@ -851,6 +853,7 @@ static void ggml_backend_cuda_split_buffer_set_tensor(ggml_backend_buffer_t buff
851
853
// split tensors must always be set in their entirety at once
852
854
GGML_ASSERT (offset == 0 );
853
855
GGML_ASSERT (size == ggml_nbytes (tensor));
856
+ GGML_ASSERT (ggml_is_contiguous (tensor) && " split buffers only supported for contiguous tensors" );
854
857
855
858
ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft ->context ;
856
859
@@ -889,6 +892,7 @@ static void ggml_backend_cuda_split_buffer_get_tensor(ggml_backend_buffer_t buff
889
892
// split tensors must always be set in their entirety at once
890
893
GGML_ASSERT (offset == 0 );
891
894
GGML_ASSERT (size == ggml_nbytes (tensor));
895
+ GGML_ASSERT (ggml_is_contiguous (tensor) && " split buffers only supported for contiguous tensors" );
892
896
893
897
ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft ->context ;
894
898
@@ -970,6 +974,7 @@ static size_t ggml_backend_cuda_split_buffer_type_get_alignment(ggml_backend_buf
970
974
971
975
static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size (ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
972
976
ggml_backend_cuda_split_buffer_type_context * ctx = (ggml_backend_cuda_split_buffer_type_context *)buft->context ;
977
+ GGML_ASSERT (ggml_is_contiguous (tensor) && " split buffers only supported for contiguous tensors" );
973
978
974
979
size_t total_size = 0 ;
975
980
@@ -2065,6 +2070,7 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor *
2065
2070
src0_slice.ne [2 ] = 1 ;
2066
2071
src0_slice.nb [3 ] = src0_slice.nb [2 ];
2067
2072
src0_slice.data = (char *) src0->data + i02*nb02;
2073
+ GGML_ASSERT (!ggml_cuda_should_use_mmq (src0->type , cc, ne11) || ne00 % MATRIX_ROW_PADDING == 0 );
2068
2074
2069
2075
ggml_tensor src1_slice;
2070
2076
memset (&src1_slice, 0 , sizeof (src1_slice));
0 commit comments