Skip to content

Commit 73d5f90

Browse files
committed
ci : update LLAMA_ -> GGML_ prefix
ggml-ci
1 parent 5b1490a commit 73d5f90

22 files changed

+156
-159
lines changed

.devops/nix/package.nix

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -205,17 +205,17 @@ effectiveStdenv.mkDerivation (
205205

206206
cmakeFlags =
207207
[
208-
(cmakeBool "LLAMA_NATIVE" false)
208+
(cmakeBool "GGML_NATIVE" false)
209209
(cmakeBool "LLAMA_BUILD_SERVER" true)
210210
(cmakeBool "BUILD_SHARED_LIBS" (!enableStatic))
211211
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
212-
(cmakeBool "LLAMA_BLAS" useBlas)
213-
(cmakeBool "LLAMA_CLBLAST" useOpenCL)
214-
(cmakeBool "LLAMA_CUDA" useCuda)
215-
(cmakeBool "LLAMA_HIPBLAS" useRocm)
216-
(cmakeBool "LLAMA_METAL" useMetalKit)
217-
(cmakeBool "LLAMA_VULKAN" useVulkan)
218-
(cmakeBool "LLAMA_STATIC" enableStatic)
212+
(cmakeBool "GGML_BLAS" useBlas)
213+
(cmakeBool "GGML_CLBLAST" useOpenCL)
214+
(cmakeBool "GGML_CUDA" useCuda)
215+
(cmakeBool "GGML_HIPBLAS" useRocm)
216+
(cmakeBool "GGML_METAL" useMetalKit)
217+
(cmakeBool "GGML_VULKAN" useVulkan)
218+
(cmakeBool "GGML_STATIC" enableStatic)
219219
]
220220
++ optionals useCuda [
221221
(
@@ -231,7 +231,7 @@ effectiveStdenv.mkDerivation (
231231
]
232232
++ optionals useMetalKit [
233233
(lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
234-
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" (!precompileMetalShaders))
234+
(cmakeBool "GGML_METAL_EMBED_LIBRARY" (!precompileMetalShaders))
235235
];
236236

237237
# Environment variables needed for ROCm

.github/labeler.yml

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2,31 +2,31 @@
22
Kompute:
33
- changed-files:
44
- any-glob-to-any-file:
5-
- ggml-kompute.h
6-
- ggml-kompute.cpp
5+
- ggml/src/ggml-kompute.h
6+
- ggml/src/ggml-kompute.cpp
77
- README-kompute.md
88
Apple Metal:
99
- changed-files:
1010
- any-glob-to-any-file:
11-
- ggml-metal.h
12-
- ggml-metal.cpp
11+
- ggml/src/ggml-metal.h
12+
- ggml/src/ggml-metal.cpp
1313
- README-metal.md
1414
SYCL:
1515
- changed-files:
1616
- any-glob-to-any-file:
17-
- ggml-sycl.h
18-
- ggml-sycl.cpp
17+
- ggml/src/ggml-sycl.h
18+
- ggml/src/ggml-sycl.cpp
1919
- README-sycl.md
2020
Nvidia GPU:
2121
- changed-files:
2222
- any-glob-to-any-file:
23-
- ggml-cuda.h
24-
- ggml-cuda/**
23+
- ggml/src/ggml-cuda.h
24+
- ggml/src/ggml-cuda/**
2525
Vulkan:
2626
- changed-files:
2727
- any-glob-to-any-file:
28-
- ggml_vk_generate_shaders.py
29-
- ggml-vulkan*
28+
- ggml/ggml_vk_generate_shaders.py
29+
- ggml/src/ggml-vulkan*
3030
documentation:
3131
- changed-files:
3232
- any-glob-to-any-file:
@@ -73,10 +73,10 @@ server:
7373
ggml:
7474
- changed-files:
7575
- any-glob-to-any-file:
76-
- ggml.c
77-
- ggml.h
78-
- ggml-*.c
79-
- ggml-*.h
76+
- ggml/include/ggml*.h
77+
- ggml/src/ggml*.c
78+
- ggml/src/ggml*.cpp
79+
- ggml/src/ggml*.h
8080
- ggml-cuda/**
8181
nix:
8282
- changed-files:

.github/workflows/bench.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ jobs:
109109
run: |
110110
set -eux
111111
cmake -B build \
112-
-DLLAMA_NATIVE=OFF \
112+
-DGGML_NATIVE=OFF \
113113
-DLLAMA_BUILD_SERVER=ON \
114114
-DLLAMA_CURL=ON \
115115
-DLLAMA_CUBLAS=ON \

.github/workflows/build.yml

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ jobs:
4747
sysctl -a
4848
mkdir build
4949
cd build
50-
cmake -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL_EMBED_LIBRARY=ON -DLLAMA_CURL=ON ..
50+
cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL_EMBED_LIBRARY=ON -DLLAMA_CURL=ON ..
5151
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
5252
5353
- name: Test
@@ -105,7 +105,7 @@ jobs:
105105
sysctl -a
106106
# Metal is disabled due to intermittent failures with Github runners not having a GPU:
107107
# https://github.com/ggerganov/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
108-
cmake -B build -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL=OFF -DLLAMA_CURL=ON
108+
cmake -B build -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF -DLLAMA_CURL=ON
109109
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
110110
111111
- name: Test
@@ -305,7 +305,7 @@ jobs:
305305
run: |
306306
mkdir build
307307
cd build
308-
cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DLLAMA_OPENMP=OFF
308+
cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DGGML_OPENMP=OFF
309309
cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
310310
311311
- name: Test
@@ -335,7 +335,7 @@ jobs:
335335
run: |
336336
mkdir build
337337
cd build
338-
cmake -DLLAMA_RPC=ON ..
338+
cmake -DGGML_RPC=ON ..
339339
cmake --build . --config Release -j $(nproc)
340340
341341
- name: Test
@@ -363,7 +363,7 @@ jobs:
363363
run: |
364364
mkdir build
365365
cd build
366-
cmake -DLLAMA_VULKAN=ON ..
366+
cmake -DGGML_VULKAN=ON ..
367367
cmake --build . --config Release -j $(nproc)
368368
369369
ubuntu-22-cmake-hip:
@@ -384,13 +384,13 @@ jobs:
384384
- name: Build with native CMake HIP support
385385
id: cmake_build
386386
run: |
387-
cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DLLAMA_HIPBLAS=ON
387+
cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DGGML_HIPBLAS=ON
388388
cmake --build build --config Release -j $(nproc)
389389
390390
- name: Build with legacy HIP support
391391
id: cmake_build_legacy_hip
392392
run: |
393-
cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DLLAMA_HIPBLAS=ON
393+
cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DGGML_HIPBLAS=ON
394394
cmake --build build2 --config Release -j $(nproc)
395395
396396
ubuntu-22-cmake-sycl:
@@ -431,7 +431,7 @@ jobs:
431431
source /opt/intel/oneapi/setvars.sh
432432
mkdir build
433433
cd build
434-
cmake -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
434+
cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
435435
cmake --build . --config Release -j $(nproc)
436436
437437
ubuntu-22-cmake-sycl-fp16:
@@ -472,10 +472,10 @@ jobs:
472472
source /opt/intel/oneapi/setvars.sh
473473
mkdir build
474474
cd build
475-
cmake -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON ..
475+
cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON ..
476476
cmake --build . --config Release -j $(nproc)
477477
478-
# TODO: build with LLAMA_NO_METAL because test-backend-ops fail on "Apple Paravirtual device" and I don't know
478+
# TODO: build with GGML_NO_METAL because test-backend-ops fail on "Apple Paravirtual device" and I don't know
479479
# how to debug it.
480480
# ref: https://github.com/ggerganov/llama.cpp/actions/runs/7131777249/job/19420981052#step:5:1124
481481
macOS-latest-make:
@@ -497,15 +497,15 @@ jobs:
497497
env:
498498
LLAMA_FATAL_WARNINGS: 1
499499
run: |
500-
LLAMA_NO_METAL=1 make -j $(sysctl -n hw.logicalcpu)
500+
GGML_NO_METAL=1 make -j $(sysctl -n hw.logicalcpu)
501501
502502
- name: Test
503503
id: make_test
504504
run: |
505-
LLAMA_NO_METAL=1 make tests -j $(sysctl -n hw.logicalcpu)
506-
LLAMA_NO_METAL=1 make test -j $(sysctl -n hw.logicalcpu)
505+
GGML_NO_METAL=1 make tests -j $(sysctl -n hw.logicalcpu)
506+
GGML_NO_METAL=1 make test -j $(sysctl -n hw.logicalcpu)
507507
508-
# TODO: build with LLAMA_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know
508+
# TODO: build with GGML_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know
509509
# how to debug it.
510510
# ref: https://github.com/ggerganov/llama.cpp/actions/runs/7132125951/job/19422043567?pr=4359#step:5:6584
511511
# would be great if we fix these
@@ -529,7 +529,7 @@ jobs:
529529
sysctl -a
530530
mkdir build
531531
cd build
532-
cmake -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL=OFF ..
532+
cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF ..
533533
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
534534
535535
- name: Test
@@ -559,7 +559,7 @@ jobs:
559559
mkdir build
560560
cd build
561561
cmake -G Xcode .. \
562-
-DLLAMA_METAL_EMBED_LIBRARY=ON \
562+
-DGGML_METAL_EMBED_LIBRARY=ON \
563563
-DLLAMA_BUILD_EXAMPLES=OFF \
564564
-DLLAMA_BUILD_TESTS=OFF \
565565
-DLLAMA_BUILD_SERVER=OFF \
@@ -588,7 +588,7 @@ jobs:
588588
mkdir build
589589
cd build
590590
cmake -G Xcode .. \
591-
-DLLAMA_METAL_EMBED_LIBRARY=ON \
591+
-DGGML_METAL_EMBED_LIBRARY=ON \
592592
-DLLAMA_BUILD_EXAMPLES=OFF \
593593
-DLLAMA_BUILD_TESTS=OFF \
594594
-DLLAMA_BUILD_SERVER=OFF \
@@ -662,7 +662,7 @@ jobs:
662662
- name: Build using make w/ OpenBLAS
663663
shell: msys2 {0}
664664
run: |
665-
make LLAMA_OPENBLAS=1 -j $(nproc)
665+
make GGML_OPENBLAS=1 -j $(nproc)
666666
667667
- name: Build using CMake
668668
shell: msys2 {0}
@@ -678,7 +678,7 @@ jobs:
678678
- name: Build using CMake w/ OpenBLAS
679679
shell: msys2 {0}
680680
run: |
681-
cmake -B build -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS
681+
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
682682
cmake --build build --config ${{ matrix.build }} -j $(nproc)
683683
684684
windows-latest-cmake:
@@ -693,25 +693,25 @@ jobs:
693693
matrix:
694694
include:
695695
- build: 'rpc-x64'
696-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_RPC=ON -DBUILD_SHARED_LIBS=ON'
696+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=ON'
697697
- build: 'noavx-x64'
698-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON'
698+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DBUILD_SHARED_LIBS=ON'
699699
- build: 'avx2-x64'
700-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
700+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
701701
- build: 'avx-x64'
702-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
702+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
703703
- build: 'avx512-x64'
704-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
704+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_AVX512=ON -DBUILD_SHARED_LIBS=ON'
705705
- build: 'openblas-x64'
706-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
706+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_BLAS=ON -DBUILD_SHARED_LIBS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
707707
- build: 'kompute-x64'
708-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
708+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON'
709709
- build: 'vulkan-x64'
710-
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
710+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_VULKAN=ON -DBUILD_SHARED_LIBS=ON'
711711
- build: 'llvm-arm64'
712-
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
712+
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
713713
- build: 'msvc-arm64'
714-
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
714+
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON'
715715

716716
steps:
717717
- name: Clone
@@ -724,7 +724,7 @@ jobs:
724724
id: clone_kompute
725725
if: ${{ matrix.build == 'kompute-x64' }}
726726
run: |
727-
git submodule update --init kompute
727+
git submodule update --init ggml/src/kompute
728728
729729
- name: Download OpenBLAS
730730
id: get_openblas
@@ -854,7 +854,7 @@ jobs:
854854
run: |
855855
mkdir build
856856
cd build
857-
cmake .. -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=ON
857+
cmake .. -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON
858858
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS}
859859
860860
- name: Determine tag name
@@ -987,7 +987,7 @@ jobs:
987987
run: |
988988
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
989989
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
990-
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DLLAMA_HIPBLAS=ON
990+
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIPBLAS=ON
991991
cmake --build build --config Release
992992
993993
ios-xcode-build:

.github/workflows/server.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,20 +92,20 @@ jobs:
9292
if: ${{ matrix.sanitizer == 'THREAD' }}
9393
run: |
9494
cmake -B build \
95-
-DLLAMA_NATIVE=OFF \
95+
-DGGML_NATIVE=OFF \
9696
-DLLAMA_BUILD_SERVER=ON \
9797
-DLLAMA_CURL=ON \
9898
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
9999
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \
100-
-DLLAMA_OPENMP=OFF ;
100+
-DGGML_OPENMP=OFF ;
101101
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
102102
103103
- name: Build
104104
id: cmake_build
105105
if: ${{ matrix.sanitizer != 'THREAD' }}
106106
run: |
107107
cmake -B build \
108-
-DLLAMA_NATIVE=OFF \
108+
-DGGML_NATIVE=OFF \
109109
-DLLAMA_BUILD_SERVER=ON \
110110
-DLLAMA_CURL=ON \
111111
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \

CMakePresets.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@
1919
"cacheVariables": {
2020
"CMAKE_EXPORT_COMPILE_COMMANDS": "ON",
2121
"CMAKE_CXX_COMPILER": "icx",
22-
"LLAMA_SYCL": "ON",
22+
"GGML_SYCL": "ON",
2323
"CMAKE_INSTALL_RPATH": "$ORIGIN;$ORIGIN/.."
2424
}
2525
},
2626
{ "name": "debug", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "Debug" } },
2727
{ "name": "release", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "Release" } },
2828
{ "name": "reldbg", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "RelWithDebInfo" } },
29-
{ "name": "static", "hidden": true, "cacheVariables": { "LLAMA_STATIC": "ON" } },
29+
{ "name": "static", "hidden": true, "cacheVariables": { "GGML_STATIC": "ON" } },
3030

3131
{
3232
"name": "arm64-windows-msvc", "hidden": true,

Package.swift

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,13 @@
33
import PackageDescription
44

55
var sources = [
6-
"ggml.c",
7-
"sgemm.cpp",
8-
"llama.cpp",
9-
"unicode.cpp",
10-
"unicode-data.cpp",
11-
"ggml-alloc.c",
12-
"ggml-backend.c",
13-
"ggml-quants.c",
6+
"src/llama.cpp",
7+
"src/unicode.cpp",
8+
"src/unicode-data.cpp",
9+
"ggml/src/ggml.c",
10+
"ggml/src/ggml-alloc.c",
11+
"ggml/src/ggml-backend.c",
12+
"ggml/src/ggml-quants.c",
1413
]
1514

1615
var resources: [Resource] = []
@@ -26,8 +25,8 @@ var cSettings: [CSetting] = [
2625
]
2726

2827
#if canImport(Darwin)
29-
sources.append("ggml-metal.m")
30-
resources.append(.process("ggml-metal.metal"))
28+
sources.append("ggml/src/ggml-metal.m")
29+
resources.append(.process("ggml/src/ggml-metal.metal"))
3130
linkerSettings.append(.linkedFramework("Accelerate"))
3231
cSettings.append(
3332
contentsOf: [
@@ -63,8 +62,6 @@ let package = Package(
6362
"models",
6463
"tests",
6564
"CMakeLists.txt",
66-
"ggml-cuda.cu",
67-
"ggml-cuda.h",
6865
"Makefile"
6966
],
7067
sources: sources,

0 commit comments

Comments
 (0)