Skip to content

Commit ce0d1a6

Browse files
authored
Merge pull request ggml-org#24 from OpenBMB/master
sync master
2 parents ea0c828 + fc1c860 commit ce0d1a6

File tree

661 files changed

+55731
-167980
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

661 files changed

+55731
-167980
lines changed

.devops/full-cuda.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG CUDA_VERSION=11.7.1
66
# Target the CUDA build image
77
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
88

9-
FROM ${BASE_CUDA_DEV_CONTAINER} as build
9+
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
ARG CUDA_DOCKER_ARCH=all
@@ -27,7 +27,7 @@ COPY . .
2727
# Set nvcc architecture
2828
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2929
# Enable CUDA
30-
ENV LLAMA_CUDA=1
30+
ENV GGML_CUDA=1
3131
# Enable cURL
3232
ENV LLAMA_CURL=1
3333

.devops/full-rocm.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
66
# Target the CUDA build image
77
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
88

9-
FROM ${BASE_ROCM_DEV_CONTAINER} as build
9+
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
@@ -36,7 +36,7 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

.devops/full.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=22.04
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
RUN apt-get update && \
66
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1

.devops/llama-cli-cuda.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VER
66
# Target the CUDA runtime image
77
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
88

9-
FROM ${BASE_CUDA_DEV_CONTAINER} as build
9+
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
ARG CUDA_DOCKER_ARCH=all
@@ -21,11 +21,11 @@ COPY . .
2121
# Set nvcc architecture
2222
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2323
# Enable CUDA
24-
ENV LLAMA_CUDA=1
24+
ENV GGML_CUDA=1
2525

2626
RUN make -j$(nproc) llama-cli
2727

28-
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
28+
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
2929

3030
RUN apt-get update && \
3131
apt-get install -y libgomp1

.devops/llama-cli-intel.Dockerfile

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,25 @@
11
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
22

3-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
44

5-
ARG LLAMA_SYCL_F16=OFF
5+
ARG GGML_SYCL_F16=OFF
66
RUN apt-get update && \
77
apt-get install -y git
88

99
WORKDIR /app
1010

1111
COPY . .
1212

13-
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
14-
echo "LLAMA_SYCL_F16 is set" && \
15-
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
13+
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
14+
echo "GGML_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
1616
fi && \
17-
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
17+
echo "Building with static libs" && \
18+
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \
19+
${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \
1820
cmake --build build --config Release --target llama-cli
1921

20-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
22+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
2123

2224
COPY --from=build /app/build/bin/llama-cli /llama-cli
2325

.devops/llama-cli-rocm.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
66
# Target the CUDA build image
77
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
88

9-
FROM ${BASE_ROCM_DEV_CONTAINER} as build
9+
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
@@ -36,7 +36,7 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

.devops/llama-cli-vulkan.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=jammy
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
# Install build tools
66
RUN apt update && apt install -y git build-essential cmake wget libgomp1
@@ -14,7 +14,7 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
1414
# Build it
1515
WORKDIR /app
1616
COPY . .
17-
RUN cmake -B build -DLLAMA_VULKAN=1 && \
17+
RUN cmake -B build -DGGML_VULKAN=1 && \
1818
cmake --build build --config Release --target llama-cli
1919

2020
# Clean up

.devops/llama-cli.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
ARG UBUNTU_VERSION=22.04
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
RUN apt-get update && \
66
apt-get install -y build-essential git
@@ -11,7 +11,7 @@ COPY . .
1111

1212
RUN make -j$(nproc) llama-cli
1313

14-
FROM ubuntu:$UBUNTU_VERSION as runtime
14+
FROM ubuntu:$UBUNTU_VERSION AS runtime
1515

1616
RUN apt-get update && \
1717
apt-get install -y libgomp1

.devops/llama-cpp-clblast.srpm.spec

Lines changed: 0 additions & 84 deletions
This file was deleted.

.devops/llama-cpp-cuda.srpm.spec

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ CPU inference for Meta's Lllama2 models using default options.
3232
%setup -n llama.cpp-master
3333

3434
%build
35-
make -j LLAMA_CUDA=1
35+
make -j GGML_CUDA=1
3636

3737
%install
3838
mkdir -p %{buildroot}%{_bindir}/

.devops/llama-server-cuda.Dockerfile

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VER
66
# Target the CUDA runtime image
77
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
88

9-
FROM ${BASE_CUDA_DEV_CONTAINER} as build
9+
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
ARG CUDA_DOCKER_ARCH=all
@@ -21,17 +21,19 @@ COPY . .
2121
# Set nvcc architecture
2222
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2323
# Enable CUDA
24-
ENV LLAMA_CUDA=1
24+
ENV GGML_CUDA=1
2525
# Enable cURL
2626
ENV LLAMA_CURL=1
2727

2828
RUN make -j$(nproc) llama-server
2929

30-
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
30+
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
3131

3232
RUN apt-get update && \
33-
apt-get install -y libcurl4-openssl-dev libgomp1
33+
apt-get install -y libcurl4-openssl-dev libgomp1 curl
3434

3535
COPY --from=build /app/llama-server /llama-server
3636

37+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
38+
3739
ENTRYPOINT [ "/llama-server" ]

.devops/llama-server-intel.Dockerfile

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,32 @@
11
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
22

3-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
44

5-
ARG LLAMA_SYCL_F16=OFF
5+
ARG GGML_SYCL_F16=OFF
66
RUN apt-get update && \
77
apt-get install -y git libcurl4-openssl-dev
88

99
WORKDIR /app
1010

1111
COPY . .
1212

13-
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
14-
echo "LLAMA_SYCL_F16 is set" && \
15-
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
13+
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
14+
echo "GGML_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
1616
fi && \
17-
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
17+
echo "Building with dynamic libs" && \
18+
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
1819
cmake --build build --config Release --target llama-server
1920

20-
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
21+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
2122

2223
RUN apt-get update && \
23-
apt-get install -y libcurl4-openssl-dev
24+
apt-get install -y libcurl4-openssl-dev curl
2425

2526
COPY --from=build /app/build/bin/llama-server /llama-server
2627

2728
ENV LC_ALL=C.utf8
2829

30+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
31+
2932
ENTRYPOINT [ "/llama-server" ]

.devops/llama-server-rocm.Dockerfile

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ ARG ROCM_VERSION=5.6
66
# Target the CUDA build image
77
ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete
88

9-
FROM ${BASE_ROCM_DEV_CONTAINER} as build
9+
FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1010

1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
@@ -36,15 +36,17 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

4343
# Enable cURL
4444
ENV LLAMA_CURL=1
4545
RUN apt-get update && \
46-
apt-get install -y libcurl4-openssl-dev
46+
apt-get install -y libcurl4-openssl-dev curl
4747

4848
RUN make -j$(nproc) llama-server
4949

50+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
51+
5052
ENTRYPOINT [ "/app/llama-server" ]
Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,20 @@
11
ARG UBUNTU_VERSION=jammy
22

3-
FROM ubuntu:$UBUNTU_VERSION as build
3+
FROM ubuntu:$UBUNTU_VERSION AS build
44

55
# Install build tools
66
RUN apt update && apt install -y git build-essential cmake wget
77

8-
# Install Vulkan SDK
8+
# Install Vulkan SDK and cURL
99
RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
1010
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
1111
apt update -y && \
12-
apt-get install -y vulkan-sdk
13-
14-
# Install cURL
15-
RUN apt-get update && \
16-
apt-get install -y libcurl4-openssl-dev
12+
apt-get install -y vulkan-sdk libcurl4-openssl-dev curl
1713

1814
# Build it
1915
WORKDIR /app
2016
COPY . .
21-
RUN cmake -B build -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
17+
RUN cmake -B build -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \
2218
cmake --build build --config Release --target llama-server
2319

2420
# Clean up
@@ -28,4 +24,6 @@ RUN cp /app/build/bin/llama-server /llama-server && \
2824

2925
ENV LC_ALL=C.utf8
3026

27+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
28+
3129
ENTRYPOINT [ "/llama-server" ]

0 commit comments

Comments
 (0)