Skip to content

Commit 990a5e2

Browse files
authored
cmake : add relocatable Llama package (#2960)
* Keep static libs and headers with install * Add logic to generate Config package * Use proper build info * Add llama as import library * Prefix target with package name * Add example project using CMake package * Update README * Update README * Remove trailing whitespace
1 parent 980ab41 commit 990a5e2

File tree

5 files changed

+247
-12
lines changed

5 files changed

+247
-12
lines changed

CMakeLists.txt

Lines changed: 54 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -172,8 +172,8 @@ if (LLAMA_METAL)
172172
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
173173

174174
message(STATUS "Metal framework found")
175-
176-
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
175+
set(GGML_HEADERS_METAL ggml-metal.h)
176+
set(GGML_SOURCES_METAL ggml-metal.m)
177177

178178
add_compile_definitions(GGML_USE_METAL)
179179
if (LLAMA_METAL_NDEBUG)
@@ -192,7 +192,6 @@ if (LLAMA_METAL)
192192
${METALKIT_FRAMEWORK}
193193
)
194194
endif()
195-
196195
if (LLAMA_BLAS)
197196
if (LLAMA_STATIC)
198197
set(BLA_STATIC ON)
@@ -269,7 +268,8 @@ if (LLAMA_BLAS)
269268
endif()
270269

271270
if (LLAMA_K_QUANTS)
272-
set(GGML_SOURCES_EXTRA ${GGML_SOURCES_EXTRA} k_quants.c k_quants.h)
271+
set(GGML_HEADERS_EXTRA k_quants.h)
272+
set(GGML_SOURCES_EXTRA k_quants.c)
273273
add_compile_definitions(GGML_USE_K_QUANTS)
274274
if (LLAMA_QKK_64)
275275
add_compile_definitions(GGML_QKK_64)
@@ -285,7 +285,8 @@ if (LLAMA_CUBLAS)
285285

286286
enable_language(CUDA)
287287

288-
set(GGML_SOURCES_CUDA ggml-cuda.cu ggml-cuda.h)
288+
set(GGML_HEADERS_CUDA ggml-cuda.h)
289+
set(GGML_SOURCES_CUDA ggml-cuda.cu)
289290

290291
add_compile_definitions(GGML_USE_CUBLAS)
291292
# if (LLAMA_CUDA_CUBLAS)
@@ -333,6 +334,7 @@ if (LLAMA_MPI)
333334
find_package(MPI)
334335
if (MPI_C_FOUND)
335336
message(STATUS "MPI found")
337+
set(GGML_HEADERS_MPI ggml-mpi.h)
336338
set(GGML_SOURCES_MPI ggml-mpi.c ggml-mpi.h)
337339
add_compile_definitions(GGML_USE_MPI)
338340
add_compile_definitions(${MPI_C_COMPILE_DEFINITIONS})
@@ -355,7 +357,8 @@ if (LLAMA_CLBLAST)
355357
if (CLBlast_FOUND)
356358
message(STATUS "CLBlast found")
357359

358-
set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h)
360+
set(GGML_HEADERS_OPENCL ggml-opencl.h)
361+
set(GGML_SOURCES_OPENCL ggml-opencl.cpp)
359362

360363
add_compile_definitions(GGML_USE_CLBLAST)
361364

@@ -631,11 +634,11 @@ add_library(ggml OBJECT
631634
ggml.h
632635
ggml-alloc.c
633636
ggml-alloc.h
634-
${GGML_SOURCES_CUDA}
635-
${GGML_SOURCES_OPENCL}
636-
${GGML_SOURCES_METAL}
637-
${GGML_SOURCES_MPI}
638-
${GGML_SOURCES_EXTRA}
637+
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
638+
${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
639+
${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
640+
${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI}
641+
${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
639642
)
640643

641644
target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
@@ -673,14 +676,53 @@ if (BUILD_SHARED_LIBS)
673676
if (LLAMA_METAL)
674677
set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
675678
endif()
676-
install(TARGETS llama LIBRARY)
677679
endif()
678680

681+
679682
#
680683
# install
681684
#
682685

683686
include(GNUInstallDirs)
687+
include(CMakePackageConfigHelpers)
688+
689+
set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR}
690+
CACHE PATH "Location of header files")
691+
set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR}
692+
CACHE PATH "Location of library files")
693+
set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR}
694+
CACHE PATH "Location of binary files")
695+
set(LLAMA_BUILD_NUMBER ${BUILD_NUMBER})
696+
set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT})
697+
set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER})
698+
699+
configure_package_config_file(
700+
${CMAKE_CURRENT_SOURCE_DIR}/scripts/LlamaConfig.cmake.in
701+
${CMAKE_CURRENT_BINARY_DIR}/LlamaConfig.cmake
702+
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/Llama
703+
PATH_VARS LLAMA_INCLUDE_INSTALL_DIR
704+
LLAMA_LIB_INSTALL_DIR
705+
LLAMA_BIN_INSTALL_DIR )
706+
707+
write_basic_package_version_file(
708+
${CMAKE_CURRENT_BINARY_DIR}/LlamaConfigVersion.cmake
709+
VERSION ${LLAMA_INSTALL_VERSION}
710+
COMPATIBILITY SameMajorVersion)
711+
712+
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/LlamaConfig.cmake
713+
${CMAKE_CURRENT_BINARY_DIR}/LlamaConfigVersion.cmake
714+
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/Llama)
715+
716+
set(GGML_PUBLIC_HEADERS "ggml.h"
717+
"${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}"
718+
"${GGML_HEADERS_METAL}" "${GGML_HEADERS_MPI}" "${GGML_HEADERS_EXTRA}")
719+
720+
set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
721+
install(TARGETS ggml PUBLIC_HEADER)
722+
723+
set_target_properties(llama PROPERTIES PUBLIC_HEADER llama.h)
724+
install(TARGETS llama LIBRARY PUBLIC_HEADER)
725+
684726
install(
685727
FILES convert.py
686728
PERMISSIONS

examples/main-cmake-pkg/.gitignore

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
# Prerequisites
2+
*.d
3+
4+
# Compiled Object files
5+
*.slo
6+
*.lo
7+
*.o
8+
*.obj
9+
10+
# Precompiled Headers
11+
*.gch
12+
*.pch
13+
14+
# Compiled Dynamic libraries
15+
*.so
16+
*.dylib
17+
*.dll
18+
19+
# Fortran module files
20+
*.mod
21+
*.smod
22+
23+
# Compiled Static libraries
24+
*.lai
25+
*.la
26+
*.a
27+
*.lib
28+
29+
# Executables
30+
*.exe
31+
*.out
32+
*.app
33+
34+
*.gguf
35+
36+
*.log
37+
.DS_Store
38+
.build/
39+
.cache/
40+
.direnv/
41+
.envrc
42+
.swiftpm
43+
.venv
44+
.clang-tidy
45+
.vs/
46+
.vscode/
47+
48+
build*/
49+
out/
50+
tmp/
51+
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
cmake_minimum_required(VERSION 3.12)
2+
project("main-cmake-pkg" C CXX)
3+
set(TARGET main-cmake-pkg)
4+
5+
find_package(Llama 0.0.1 REQUIRED)
6+
7+
# Bake common functionality in with target. Because applications
8+
# using the relocatable Llama package should be outside of the
9+
# source tree, main-cmake-pkg pretends the dependencies are built-in.
10+
11+
set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common")
12+
add_library(common OBJECT
13+
${_common_path}/common.h
14+
${_common_path}/common.cpp
15+
${_common_path}/console.h
16+
${_common_path}/console.cpp
17+
${_common_path}/grammar-parser.h
18+
${_common_path}/grammar-parser.cpp
19+
)
20+
21+
# WARNING: because build-info.h is auto-generated, it will only
22+
# be available after the user has built the llama.cpp sources.
23+
#
24+
configure_file(${_common_path}/../build-info.h
25+
${CMAKE_CURRENT_BINARY_DIR}/build-info.h
26+
COPYONLY)
27+
28+
target_include_directories(common PUBLIC ${LLAMA_INCLUDE_DIR}
29+
${CMAKE_CURRENT_BINARY_DIR})
30+
31+
add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../main/main.cpp)
32+
target_include_directories(${TARGET} PRIVATE ${_common_path})
33+
install(TARGETS ${TARGET} RUNTIME)
34+
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
35+
target_compile_features(${TARGET} PRIVATE cxx_std_11)
36+

examples/main-cmake-pkg/README.md

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
# llama.cpp/example/main-cmake-pkg
2+
3+
This program builds the [main](../main) application using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree.
4+
5+
## Building
6+
7+
Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions.
8+
9+
### Considerations
10+
11+
When hardware acceleration libraries are used (e.g. CUBlas, Metal, CLBlast, etc.), CMake must be able to locate the associated CMake package. In the example below, when building _main-cmake-pkg_ notice the `CMAKE_PREFIX_PATH` includes the Llama CMake package location _in addition to_ the CLBlast package—which was used when compiling _llama.cpp_.
12+
13+
### Build llama.cpp and install to C:\LlamaCPP directory
14+
15+
In this case, CLBlast was already installed so the CMake package is referenced in `CMAKE_PREFIX_PATH`.
16+
17+
```cmd
18+
git clone https://github.com/ggerganov/llama.cpp
19+
cd llama.cpp
20+
mkdir build
21+
cd build
22+
cmake .. -DBUILD_SHARED_LIBS=OFF -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH=C:/CLBlast/lib/cmake/CLBlast -G "Visual Studio 17 2022" -A x64
23+
cmake --build . --config Release
24+
cmake --install . --prefix C:/LlamaCPP
25+
```
26+
27+
### Build main-cmake-pkg
28+
29+
30+
```cmd
31+
cd ..\examples\main-cmake-pkg
32+
mkdir build
33+
cd build
34+
cmake .. -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/CLBlast/lib/cmake/CLBlast;C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64
35+
cmake --build . --config Release
36+
cmake --install . --prefix C:/MyLlamaApp
37+
```

scripts/LlamaConfig.cmake.in

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
set(LLAMA_VERSION @LLAMA_INSTALL_VERSION@)
2+
set(LLAMA_BUILD_COMMIT @LLAMA_BUILD_COMMIT@)
3+
set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@)
4+
set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@)
5+
set(LLAMA_BLAS @LLAMA_BLAS@)
6+
set(LLAMA_CUBLAS @LLAMA_CUBLAS@)
7+
set(LLAMA_METAL @LLAMA_METAL@)
8+
set(LLAMA_MPI @LLAMA_MPI@)
9+
set(LLAMA_CLBLAST @LLAMA_CLBLAST@)
10+
set(LLAMA_HIPBLAS @LLAMA_HIPBLAS@)
11+
set(LLAMA_ACCELERATE @LLAMA_ACCELERATE@)
12+
13+
@PACKAGE_INIT@
14+
15+
set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@")
16+
set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@")
17+
set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@")
18+
19+
# Ensure transient dependencies satisfied
20+
21+
find_package(Threads REQUIRED)
22+
if (APPLE AND LLAMA_ACCELERATE)
23+
find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
24+
endif()
25+
26+
if (LLAMA_BLAS)
27+
find_package(BLAS REQUIRED)
28+
endif()
29+
30+
if (LLAMA_CUBLAS)
31+
find_package(CUDAToolkit REQUIRED)
32+
endif()
33+
34+
if (LLAMA_METAL)
35+
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
36+
find_library(METAL_FRAMEWORK Metal REQUIRED)
37+
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
38+
endif()
39+
40+
if (LLAMA_MPI)
41+
find_package(MPI REQUIRED)
42+
endif()
43+
44+
if (LLAMA_CLBLAST)
45+
find_package(CLBlast REQUIRED)
46+
endif()
47+
48+
if (LLAMA_HIPBLAS)
49+
find_package(hip REQUIRED)
50+
find_package(hipblas REQUIRED)
51+
find_package(rocblas REQUIRED)
52+
endif()
53+
54+
find_library(llama_LIBRARY llama
55+
REQUIRED
56+
HINTS ${LLAMA_LIB_DIR})
57+
58+
set(_llama_link_deps "Threads::Threads" "@LLAMA_EXTRA_LIBS@")
59+
add_library(llama UNKNOWN IMPORTED)
60+
set_target_properties(llama
61+
PROPERTIES
62+
INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}"
63+
INTERFACE_LINK_LIBRARIES "${_llama_link_deps}"
64+
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
65+
IMPORTED_LOCATION "${llama_LIBRARY}"
66+
INTERFACE_COMPILE_FEATURES cxx_std_11
67+
POSITION_INDEPENDENT_CODE ON )
68+
69+
check_required_components(Llama)

0 commit comments

Comments
 (0)