Skip to content

Commit ab96610

Browse files
authored
cmake : enable warnings in llama (#10474)
* cmake : enable warnings in llama ggml-ci * cmake : add llama_get_flags and respect LLAMA_FATAL_WARNINGS * cmake : get_flags -> ggml_get_flags * speculative-simple : fix warnings * cmake : reuse ggml_get_flags ggml-ci * speculative-simple : fix compile warning ggml-ci
1 parent 7db3846 commit ab96610

File tree

8 files changed

+49
-6
lines changed

8 files changed

+49
-6
lines changed

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
8282

8383
# Required for relocatable CMake package
8484
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
85+
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake)
8586

8687
# override ggml options
8788
set(GGML_SANITIZE_THREAD ${LLAMA_SANITIZE_THREAD})

cmake/common.cmake

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
function(llama_add_compile_flags)
2+
if (LLAMA_FATAL_WARNINGS)
3+
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
4+
list(APPEND C_FLAGS -Werror)
5+
list(APPEND CXX_FLAGS -Werror)
6+
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
7+
add_compile_options(/WX)
8+
endif()
9+
endif()
10+
11+
if (LLAMA_ALL_WARNINGS)
12+
if (NOT MSVC)
13+
list(APPEND C_FLAGS -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes
14+
-Werror=implicit-int -Werror=implicit-function-declaration)
15+
16+
list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn)
17+
18+
list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function)
19+
20+
list(APPEND C_FLAGS ${WARNING_FLAGS})
21+
list(APPEND CXX_FLAGS ${WARNING_FLAGS})
22+
23+
ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION})
24+
25+
add_compile_options("$<$<COMPILE_LANGUAGE:C>:${C_FLAGS};${GF_C_FLAGS}>"
26+
"$<$<COMPILE_LANGUAGE:CXX>:${CXX_FLAGS};${GF_CXX_FLAGS}>")
27+
else()
28+
# todo : msvc
29+
set(C_FLAGS "" PARENT_SCOPE)
30+
set(CXX_FLAGS "" PARENT_SCOPE)
31+
endif()
32+
endif()
33+
endfunction()

common/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
find_package(Threads REQUIRED)
44

5+
llama_add_compile_flags()
6+
57
# Build info header
68
#
79

examples/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@ find_package(Threads REQUIRED)
66

77
# ...
88

9+
# flags
10+
11+
llama_add_compile_flags()
12+
913
# examples
1014

1115
include_directories(${CMAKE_CURRENT_SOURCE_DIR})

examples/speculative-simple/speculative-simple.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,13 +70,13 @@ int main(int argc, char ** argv) {
7070
std::vector<llama_token> inp;
7171
inp = common_tokenize(ctx_tgt, params.prompt, true, true);
7272

73-
if (llama_n_ctx(ctx_tgt) < (int) inp.size()) {
73+
if (llama_n_ctx(ctx_tgt) < (uint32_t) inp.size()) {
7474
LOG_ERR("%s: the prompt exceeds the context size (%d tokens, ctx %d)\n", __func__, (int) inp.size(), llama_n_ctx(ctx_tgt));
7575

7676
return 1;
7777
}
7878

79-
if (llama_n_batch(ctx_tgt) < (int) inp.size()) {
79+
if (llama_n_batch(ctx_tgt) < (uint32_t) inp.size()) {
8080
LOG_ERR("%s: the prompt exceeds the batch size (%d tokens, batch %d)\n", __func__, (int) inp.size(), llama_n_batch(ctx_tgt));
8181

8282
return 1;
@@ -155,7 +155,7 @@ int main(int argc, char ** argv) {
155155
// evaluate the target model on [id_last, draft0, draft1, ..., draftN-1]
156156
{
157157
// do not waste time on small drafts
158-
if (draft.size() < n_draft_min) {
158+
if (draft.size() < (size_t) n_draft_min) {
159159
draft.clear();
160160
}
161161

ggml/src/CMakeLists.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ if (NOT MSVC)
2424
endif()
2525
endif()
2626

27-
function(get_flags CCID CCVER)
27+
function(ggml_get_flags CCID CCVER)
2828
set(C_FLAGS "")
2929
set(CXX_FLAGS "")
3030

@@ -41,6 +41,7 @@ function(get_flags CCID CCVER)
4141
elseif (CCID STREQUAL "GNU")
4242
set(C_FLAGS -Wdouble-promotion)
4343
set(CXX_FLAGS -Wno-array-bounds)
44+
4445
if (CCVER VERSION_GREATER_EQUAL 8.1.0)
4546
list(APPEND CXX_FLAGS -Wextra-semi)
4647
endif()
@@ -69,7 +70,7 @@ if (GGML_ALL_WARNINGS)
6970
list(APPEND C_FLAGS ${WARNING_FLAGS})
7071
list(APPEND CXX_FLAGS ${WARNING_FLAGS})
7172

72-
get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION})
73+
ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION})
7374

7475
add_compile_options("$<$<COMPILE_LANGUAGE:C>:${C_FLAGS};${GF_C_FLAGS}>"
7576
"$<$<COMPILE_LANGUAGE:CXX>:${CXX_FLAGS};${GF_CXX_FLAGS}>")

ggml/src/ggml-cuda/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ if (CUDAToolkit_FOUND)
132132

133133
message("-- CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}")
134134

135-
get_flags(${CUDA_CCID} ${CUDA_CCVER})
135+
ggml_get_flags(${CUDA_CCID} ${CUDA_CCVER})
136136
list(APPEND CUDA_CXX_FLAGS ${CXX_FLAGS} ${GF_CXX_FLAGS}) # This is passed to -Xcompiler later
137137
endif()
138138

src/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@ if (WIN32)
55
endif()
66
endif()
77

8+
llama_add_compile_flags()
9+
810
#
911
# libraries
1012
#

0 commit comments

Comments
 (0)