Skip to content

Fix #777, #464 #778

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
data/
.python-version

.vscode/
Expand Down
3 changes: 0 additions & 3 deletions .gitmodules

This file was deleted.

46 changes: 11 additions & 35 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,40 +5,16 @@ project(llama_cpp)
option(LLAMA_BUILD "Build llama.cpp shared library and install alongside python package" ON)

if (LLAMA_BUILD)
set(BUILD_SHARED_LIBS "On")
if (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
# Need to disable these llama.cpp flags on Apple x86_64,
# otherwise users may encounter invalid instruction errors
set(LLAMA_AVX "Off" CACHE BOOL "llama: enable AVX" FORCE)
set(LLAMA_AVX2 "Off" CACHE BOOL "llama: enable AVX2" FORCE)
set(LLAMA_FMA "Off" CACHE BOOL "llama: enable FMA" FORCE)
set(LLAMA_F16C "Off" CACHE BOOL "llama: enable F16C" FORCE)
include(ExternalProject)
if(NOT DEFINED SKBUILD_PLATLIB_DIR)
set(SKBUILD_PLATLIB_DIR ${CMAKE_SOURCE_DIR})
endif()
add_subdirectory(vendor/llama.cpp)
install(
TARGETS llama
LIBRARY DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp
RUNTIME DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp
ARCHIVE DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp
FRAMEWORK DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp
RESOURCE DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp
)
# Temporary fix for https://github.com/scikit-build/scikit-build-core/issues/374
install(
TARGETS llama
LIBRARY DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
RUNTIME DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
ARCHIVE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
FRAMEWORK DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
RESOURCE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
)
# Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
install(
FILES $<TARGET_RUNTIME_DLLS:llama>
DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp
)
install(
FILES $<TARGET_RUNTIME_DLLS:llama>
DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
)
ExternalProject_Add(
llama-cpp
GIT_REPOSITORY "https://github.com/ggerganov/llama.cpp"
GIT_TAG f5ef5cf
CMAKE_ARGS -DBUILD_SHARED_LIBS=ON "-DCMAKE_INSTALL_PREFIX=${SKBUILD_PLATLIB_DIR}/llama_cpp/data"
USES_TERMINAL_DOWNLOAD 1
USES_TERMINAL_CONFIGURE 1
USES_TERMINAL_BUILD 1)
endif()
2 changes: 1 addition & 1 deletion llama_cpp/llama_cpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# Load the library
def _load_shared_library(lib_base_name: str):
# Construct the paths to the possible shared library names
_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "data" / "lib"
# Searching for the library in the current directory under the name "libllama" (default name
# for llamacpp) and "llama" (default name for this repo)
_lib_paths: List[pathlib.Path] = []
Expand Down
1 change: 0 additions & 1 deletion vendor/llama.cpp
Submodule llama.cpp deleted from f5ef5c