Skip to content

Commit 3698cd0

Browse files
Renamed DMMV X/Y compilation options
1 parent 98bfee0 commit 3698cd0

File tree

3 files changed

+65
-65
lines changed

3 files changed

+65
-65
lines changed

CMakeLists.txt

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -37,44 +37,44 @@ endif()
3737
#
3838

3939
# general
40-
option(LLAMA_STATIC "llama: static link libraries" OFF)
41-
option(LLAMA_NATIVE "llama: enable -march=native flag" OFF)
42-
option(LLAMA_LTO "llama: enable link time optimization" OFF)
40+
option(LLAMA_STATIC "llama: static link libraries" OFF)
41+
option(LLAMA_NATIVE "llama: enable -march=native flag" OFF)
42+
option(LLAMA_LTO "llama: enable link time optimization" OFF)
4343

4444
# debug
45-
option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON)
46-
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
47-
option(LLAMA_GPROF "llama: enable gprof" OFF)
45+
option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON)
46+
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
47+
option(LLAMA_GPROF "llama: enable gprof" OFF)
4848

4949
# sanitizers
50-
option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF)
51-
option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF)
52-
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
50+
option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF)
51+
option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF)
52+
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
5353

5454
# instruction set specific
55-
option(LLAMA_AVX "llama: enable AVX" ON)
56-
option(LLAMA_AVX2 "llama: enable AVX2" ON)
57-
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
58-
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
59-
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
60-
option(LLAMA_FMA "llama: enable FMA" ON)
55+
option(LLAMA_AVX "llama: enable AVX" ON)
56+
option(LLAMA_AVX2 "llama: enable AVX2" ON)
57+
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
58+
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
59+
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
60+
option(LLAMA_FMA "llama: enable FMA" ON)
6161
# in MSVC F16C is implied with AVX2/AVX512
6262
if (NOT MSVC)
63-
option(LLAMA_F16C "llama: enable F16C" ON)
63+
option(LLAMA_F16C "llama: enable F16C" ON)
6464
endif()
6565

6666
# 3rd party libs
67-
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
68-
option(LLAMA_BLAS "llama: use BLAS" OFF)
69-
option(LLAMA_BLAS_VENDOR "llama: BLA_VENDOR from https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" Generic)
70-
option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
71-
set(LLAMA_CUDA_BX "32" CACHE STRING "llama: x block size for dmmv CUDA kernels")
72-
set(LLAMA_CUDA_BY "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
73-
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
74-
75-
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
76-
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
77-
option(LLAMA_BUILD_SERVER "llama: build server example" OFF)
67+
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
68+
option(LLAMA_BLAS "llama: use BLAS" OFF)
69+
option(LLAMA_BLAS_VENDOR "llama: BLA_VENDOR from https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" Generic)
70+
option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
71+
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
72+
set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
73+
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
74+
75+
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
76+
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
77+
option(LLAMA_BUILD_SERVER "llama: build server example" OFF)
7878

7979
#
8080
# Build info header
@@ -186,8 +186,8 @@ if (LLAMA_CUBLAS)
186186
set(GGML_CUDA_SOURCES ggml-cuda.cu ggml-cuda.h)
187187

188188
add_compile_definitions(GGML_USE_CUBLAS)
189-
add_compile_definitions(GGML_CUDA_DMMV_BLOCK_X=${LLAMA_CUDA_BX})
190-
add_compile_definitions(GGML_CUDA_DMMV_BLOCK_Y=${LLAMA_CUDA_BY})
189+
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
190+
add_compile_definitions(GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y})
191191

192192
if (LLAMA_STATIC)
193193
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)

Makefile

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -133,16 +133,16 @@ ifdef LLAMA_CUBLAS
133133
OBJS += ggml-cuda.o
134134
NVCC = nvcc
135135
NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native
136-
ifdef LLAMA_CUDA_BX
137-
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_X=$(LLAMA_CUDA_BX)
136+
ifdef LLAMA_CUDA_DMMV_X
137+
NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
138138
else
139-
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_X=32
140-
endif # LLAMA_CUDA_BY
139+
NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
140+
endif # LLAMA_CUDA_DMMV_X
141141
ifdef LLAMA_CUDA_BY
142-
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_Y=$(LLAMA_CUDA_BY)
142+
NVCCFLAGS += -DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
143143
else
144-
NVCCFLAGS += -DGGML_CUDA_DMMV_BLOCK_Y=1
145-
endif # LLAMA_CUDA_BY
144+
NVCCFLAGS += -DGGML_CUDA_DMMV_Y=1
145+
endif # LLAMA_CUDA_DMMV_Y
146146
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
147147
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
148148
endif # LLAMA_CUBLAS

ggml-cuda.cu

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -90,11 +90,11 @@ static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 blo
9090
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
9191

9292
// dmmv = dequantize_mul_mat_vec
93-
#ifndef GGML_CUDA_DMMV_BLOCK_X
94-
#define GGML_CUDA_DMMV_BLOCK_X 32 // can by set by compiler option LLAMA_CUDA_BY
93+
#ifndef GGML_CUDA_DMMV_X
94+
#define GGML_CUDA_DMMV_X 32 // can by set by compiler option LLAMA_CUDA_BY
9595
#endif
96-
#ifndef GGML_CUDA_DMMV_BLOCK_Y
97-
#define GGML_CUDA_DMMV_BLOCK_Y 1 // can by set by compiler option LLAMA_CUDA_BY
96+
#ifndef GGML_CUDA_DMMV_Y
97+
#define GGML_CUDA_DMMV_Y 1 // can by set by compiler option LLAMA_CUDA_BY
9898
#endif
9999

100100
static __global__ void mul_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
@@ -217,7 +217,7 @@ static __global__ void dequantize_mul_mat_vec(const void * vx, const float * y,
217217
const int row = blockIdx.x*blockDim.y + threadIdx.y;
218218
const int tid = threadIdx.x;
219219

220-
const int iter_stride = 2*GGML_CUDA_DMMV_BLOCK_X;
220+
const int iter_stride = 2*GGML_CUDA_DMMV_X;
221221
const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
222222
const int y_offset = qr == 1 ? 1 : qk/2;
223223

@@ -289,43 +289,43 @@ static void dequantize_row_q8_0_cuda(const void * vx, float * y, const int k, cu
289289
}
290290

291291
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
292-
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
293-
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
294-
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
292+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
293+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
294+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
295295
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
296-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
296+
<<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
297297
}
298298

299299
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
300-
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
301-
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
302-
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
300+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
301+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
302+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
303303
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
304-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
304+
<<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
305305
}
306306

307307
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
308-
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
309-
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
310-
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
308+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
309+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
310+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
311311
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
312-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
312+
<<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
313313
}
314314

315315
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
316-
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
317-
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
318-
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
316+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
317+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
318+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
319319
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
320-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
320+
<<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
321321
}
322322

323323
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
324-
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
325-
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
326-
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
324+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
325+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
326+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
327327
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
328-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
328+
<<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
329329
}
330330

331331
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
@@ -334,11 +334,11 @@ static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, c
334334
}
335335

336336
static void convert_mul_mat_vec_f16_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
337-
GGML_ASSERT(ncols % GGML_CUDA_DMMV_BLOCK_X == 0);
338-
GGML_ASSERT(nrows % GGML_CUDA_DMMV_BLOCK_Y == 0);
339-
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_BLOCK_Y, 1);
337+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
338+
GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
339+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
340340
dequantize_mul_mat_vec<1, 1, convert_f16>
341-
<<<nrows/GGML_CUDA_DMMV_BLOCK_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
341+
<<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
342342
}
343343

344344
static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {

0 commit comments

Comments
 (0)