Skip to content

Commit a06f7ec

Browse files
Fewer iters, more ops per iter
1 parent 637483b commit a06f7ec

File tree

3 files changed

+68
-76
lines changed

3 files changed

+68
-76
lines changed

CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,8 @@ option(LLAMA_ACCELERATE "llama: enable Accelerate framework"
6868
option(LLAMA_BLAS "llama: use BLAS" OFF)
6969
option(LLAMA_BLAS_VENDOR "llama: BLA_VENDOR from https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" Generic)
7070
option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
71+
set(LLAMA_CUDA_BX "32" CACHE STRING "llama: x block size for dmmv CUDA kernels")
7172
set(LLAMA_CUDA_BY "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
72-
option(LLAMA_CUDA_UNROLL "llama: unroll loops in dmmv CUDA kernels" OFF)
7373
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
7474

7575
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
@@ -185,8 +185,8 @@ if (LLAMA_CUBLAS)
185185
set(GGML_CUDA_SOURCES ggml-cuda.cu ggml-cuda.h)
186186

187187
add_compile_definitions(GGML_USE_CUBLAS)
188+
add_compile_definitions(GGML_CUDA_DMMV_BLOCK_X=${LLAMA_CUDA_BX})
188189
add_compile_definitions(GGML_CUDA_DMMV_BLOCK_Y=${LLAMA_CUDA_BY})
189-
add_compile_definitions(GGML_CUDA_UNROLL=${LLAMA_CUDA_UNROLL})
190190

191191
if (LLAMA_STATIC)
192192
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)

Makefile

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,14 +133,16 @@ ifdef LLAMA_CUBLAS
133133
OBJS += ggml-cuda.o
134134
NVCC = nvcc
135135
NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native
136+
ifdef LLAMA_CUDA_BX
137+
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_X=$(LLAMA_CUDA_BX)
138+
else
139+
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_X=32
140+
endif # LLAMA_CUDA_BY
136141
ifdef LLAMA_CUDA_BY
137142
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_Y=$(LLAMA_CUDA_BY)
138143
else
139144
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_Y=1
140145
endif # LLAMA_CUDA_BY
141-
ifdef LLAMA_CUDA_UNROLL
142-
NVCCFLAGS += -DGGML_CUDA_UNROLL=$(LLAMA_CUDA_UNROLL)
143-
endif # LLAMA_CUDA_UNROLL
144146
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
145147
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
146148
endif # LLAMA_CUBLAS

ggml-cuda.cu

Lines changed: 61 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -83,10 +83,16 @@ typedef struct {
8383
} block_q8_0;
8484
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
8585

86+
#define WARP_SIZE 32
87+
8688
#define CUDA_MUL_BLOCK_SIZE 256
89+
8790
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
91+
8892
// dmmv = dequantize_mul_mat_vec
89-
#define GGML_CUDA_DMMV_BLOCK_X 32
93+
#ifndef GGML_CUDA_DMMV_BLOCK_X
94+
#define GGML_CUDA_DMMV_BLOCK_X 32 // can by set by compiler option LLAMA_CUDA_BY
95+
#endif
9096
#ifndef GGML_CUDA_DMMV_BLOCK_Y
9197
#define GGML_CUDA_DMMV_BLOCK_Y 1 // can by set by compiler option LLAMA_CUDA_BY
9298
#endif
@@ -204,32 +210,40 @@ static __global__ void dequantize_block(const void * vx, float * y, const int k)
204210
dequantize_kernel(vx, ib, iqs, v0, v1);
205211
}
206212

207-
template <int ncols, int block_size, int qk, int qr, dequantize_kernel_t dequantize_kernel>
208-
static __global__ void dequantize_mul_mat_vec(const void * vx, const float * y, float * dst) {
213+
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
214+
static __global__ void dequantize_mul_mat_vec(const void * vx, const float * y, float * dst, const int ncols) {
215+
// qk = quantized weights per x block
216+
// qr = number of quantized weights per data value in x block
209217
const int row = blockIdx.x*blockDim.y + threadIdx.y;
210218
const int tid = threadIdx.x;
211219

220+
const int iter_stride = 2*GGML_CUDA_DMMV_BLOCK_X;
221+
const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
212222
const int y_offset = qr == 1 ? 1 : qk/2;
213223

214-
215224
float tmp = 0; // partial sum for thread in warp
216225

217-
#ifdef GGML_CUDA_UNROLL
218-
#pragma unroll
219-
#endif
220-
for (int i = 0; i < ncols/block_size; i += 2) {
221-
const int col = i*block_size + 2*tid;
222-
const int ib = (row*ncols + col)/qk; // block index
223-
const int iqs = (col%qk)/qr; // quant index
226+
for (int i = 0; i < ncols; i += iter_stride) {
227+
const int col = i + vals_per_iter*tid;
228+
const int ib = (row*ncols + col)/qk; // x block index
229+
const int iqs = (col%qk)/qr; // x quant index
224230
const int iybs = col - col%qk; // y block start index
225231

226-
// dequantize
227-
float v0, v1;
228-
dequantize_kernel(vx, ib, iqs, v0, v1);
229-
230-
// matrix multiplication
231-
tmp += v0 * y[iybs + iqs + 0];
232-
tmp += v1 * y[iybs + iqs + y_offset];
232+
// processing >2 values per i iter is faster for fast GPUs
233+
#pragma unroll
234+
for (int j = 0; j < vals_per_iter; j += 2) {
235+
// process 2 vals per j iter
236+
237+
// dequantize
238+
float v0, v1;
239+
dequantize_kernel(vx, ib, iqs + j/qr, v0, v1);
240+
// for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
241+
242+
// matrix multiplication
243+
tmp += v0 * y[iybs + iqs + j/qr + 0];
244+
tmp += v1 * y[iybs + iqs + j/qr + y_offset];
245+
// for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
246+
}
233247
}
234248

235249
// sum up partial sums and write back result
@@ -274,72 +288,44 @@ static void dequantize_row_q8_0_cuda(const void * vx, float * y, const int k, cu
274288
dequantize_block<QK8_0, QR8_0, dequantize_q8_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
275289
}
276290

277-
template<dequantize_kernel_t dequantize_kernel, int qk, int qr>
278-
static void dequantize_mul_mat_vec_cuda(const void * vx, const float * y, float * dst,
279-
const int ncols, const int nrows, cudaStream_t stream) {
291+
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
280292
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
281293
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
282-
const dim3 block_dims(GGML_CUDA_DMMV_BLOCK_X, GGML_CUDA_DMMV_BLOCK_Y, 1);
283-
284-
// Use a switch statement for ncols so the compiler can unroll all loops:
285-
switch (ncols) {
286-
case 4096:
287-
dequantize_mul_mat_vec<4096, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
288-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
289-
break;
290-
case 5120:
291-
dequantize_mul_mat_vec<5120, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
292-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
293-
break;
294-
case 6656:
295-
dequantize_mul_mat_vec<6656, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
296-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
297-
break;
298-
case 8192:
299-
dequantize_mul_mat_vec<8192, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
300-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
301-
break;
302-
case 11008:
303-
dequantize_mul_mat_vec<11008, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
304-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
305-
break;
306-
case 13824:
307-
dequantize_mul_mat_vec<13824, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
308-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
309-
break;
310-
case 17920:
311-
dequantize_mul_mat_vec<17920, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
312-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
313-
break;
314-
case 22016:
315-
dequantize_mul_mat_vec<22016, GGML_CUDA_DMMV_BLOCK_X, qk, qr, dequantize_kernel>
316-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst);
317-
break;
318-
default:
319-
fprintf(stderr, "Tell the devs to add a switch case for this: ncols=%d\n", ncols);
320-
GGML_ASSERT(false);
321-
break;
322-
}
323-
}
324-
325-
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
326-
dequantize_mul_mat_vec_cuda<dequantize_q4_0, QK4_0, QR4_0>(vx, y, dst, ncols, nrows, stream);
294+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
295+
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
296+
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
327297
}
328298

329299
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
330-
dequantize_mul_mat_vec_cuda<dequantize_q4_1, QK4_1, QR4_1>(vx, y, dst, ncols, nrows, stream);
300+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
301+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
302+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
303+
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
304+
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
331305
}
332306

333307
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
334-
dequantize_mul_mat_vec_cuda<dequantize_q5_0, QK5_0, QR5_0>(vx, y, dst, ncols, nrows, stream);
308+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
309+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
310+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
311+
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
312+
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
335313
}
336314

337315
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
338-
dequantize_mul_mat_vec_cuda<dequantize_q5_1, QK5_1, QR5_1>(vx, y, dst, ncols, nrows, stream);
316+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
317+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
318+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
319+
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
320+
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
339321
}
340322

341323
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
342-
dequantize_mul_mat_vec_cuda<dequantize_q8_0, QK8_0, QR8_0>(vx, y, dst, ncols, nrows, stream);
324+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
325+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
326+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
327+
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
328+
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
343329
}
344330

345331
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
@@ -348,7 +334,11 @@ static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, c
348334
}
349335

350336
static void convert_mul_mat_vec_f16_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
351-
dequantize_mul_mat_vec_cuda<convert_f16, 1, 1>(vx, y, dst, ncols, nrows, stream);
337+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
338+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
339+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
340+
dequantize_mul_mat_vec<1, 1, convert_f16>
341+
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
352342
}
353343

354344
static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {

0 commit comments

Comments
 (0)