Skip to content

Commit db7a012

Browse files
committed
Merge 'origin/master' into hipblas
2 parents 3677235 + 284685f commit db7a012

24 files changed

+1542
-457
lines changed

.github/workflows/build.yml

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,14 @@ on:
1212
- master
1313
paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.c', '**/*.cpp']
1414
pull_request:
15-
types: [opened, synchronize, edited, reopened, review_requested, ready_for_review]
15+
types: [opened, synchronize, reopened]
1616
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.c', '**/*.cpp']
1717

1818
env:
1919
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
2020

2121
jobs:
2222
ubuntu-latest-make:
23-
if: github.event.pull_request.draft == false
24-
2523
runs-on: ubuntu-latest
2624

2725
steps:
@@ -41,8 +39,6 @@ jobs:
4139
make
4240
4341
ubuntu-latest-cmake:
44-
if: github.event.pull_request.draft == false
45-
4642
runs-on: ubuntu-latest
4743

4844
steps:
@@ -71,8 +67,6 @@ jobs:
7167
ctest --verbose
7268
7369
ubuntu-latest-cmake-sanitizer:
74-
if: github.event.pull_request.draft == false
75-
7670
runs-on: ubuntu-latest
7771

7872
continue-on-error: true
@@ -108,8 +102,6 @@ jobs:
108102
ctest --verbose
109103
110104
macOS-latest-make:
111-
if: github.event.pull_request.draft == false
112-
113105
runs-on: macos-latest
114106

115107
steps:
@@ -128,8 +120,6 @@ jobs:
128120
make
129121
130122
macOS-latest-cmake:
131-
if: github.event.pull_request.draft == false
132-
133123
runs-on: macOS-latest
134124

135125
steps:
@@ -157,8 +147,6 @@ jobs:
157147
ctest --verbose
158148
159149
windows-latest-cmake:
160-
if: github.event.pull_request.draft == false
161-
162150
runs-on: windows-latest
163151

164152
strategy:
@@ -169,7 +157,7 @@ jobs:
169157
- build: 'avx'
170158
defines: '-DLLAMA_AVX2=OFF'
171159
- build: 'avx512'
172-
defines: '-DLLAMA_AVX512=ON'
160+
defines: '-DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
173161

174162
steps:
175163
- name: Clone

CMakeLists.txt

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,10 @@ endif()
232232

233233
if (MSVC)
234234
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
235+
236+
if (BUILD_SHARED_LIBS)
237+
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
238+
endif()
235239
endif()
236240

237241
if (LLAMA_LTO)
@@ -338,7 +342,8 @@ add_library(ggml OBJECT
338342

339343
target_include_directories(ggml PUBLIC .)
340344
target_compile_features(ggml PUBLIC c_std_11) # don't bump
341-
target_link_libraries(ggml PRIVATE Threads::Threads ${LLAMA_EXTRA_LIBS})
345+
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
346+
342347
if (BUILD_SHARED_LIBS)
343348
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
344349
endif()
@@ -351,6 +356,7 @@ add_library(llama
351356
target_include_directories(llama PUBLIC .)
352357
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
353358
target_link_libraries(llama PRIVATE ggml ${LLAMA_EXTRA_LIBS})
359+
354360
if (BUILD_SHARED_LIBS)
355361
set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON)
356362
target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD)

Makefile

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -74,13 +74,17 @@ endif
7474
# feel free to update the Makefile for your architecture and send a pull request or issue
7575
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
7676
# Use all CPU extensions that are available:
77-
CFLAGS += -march=native -mtune=native
77+
CFLAGS += -march=native -mtune=native
7878
CXXFLAGS += -march=native -mtune=native
79+
80+
# Usage AVX-only
81+
#CFLAGS += -mfma -mf16c -mavx
82+
#CXXFLAGS += -mfma -mf16c -mavx
7983
endif
8084
ifneq ($(filter ppc64%,$(UNAME_M)),)
8185
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
8286
ifneq (,$(findstring POWER9,$(POWER9_M)))
83-
CFLAGS += -mcpu=power9
87+
CFLAGS += -mcpu=power9
8488
CXXFLAGS += -mcpu=power9
8589
endif
8690
# Require c++23's std::byteswap for big-endian support.
@@ -101,11 +105,13 @@ ifdef LLAMA_OPENBLAS
101105
LDFLAGS += -lopenblas
102106
endif
103107
ifdef LLAMA_CUBLAS
104-
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include
105-
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64
106-
OBJS += ggml-cuda.o
108+
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include
109+
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64
110+
OBJS += ggml-cuda.o
111+
NVCC = nvcc
112+
NVCCFLAGS = --forward-unknown-to-host-linker -arch=native
107113
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
108-
nvcc -arch=native -c -o $@ $<
114+
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -c $< -o $@
109115
endif
110116
ifdef LLAMA_HIPBLAS
111117
ROCM_PATH ?= /opt/rocm
@@ -124,8 +130,12 @@ ifdef LLAMA_GPROF
124130
CFLAGS += -pg
125131
CXXFLAGS += -pg
126132
endif
133+
ifdef LLAMA_PERF
134+
CFLAGS += -DGGML_PERF
135+
CXXFLAGS += -DGGML_PERF
136+
endif
127137
ifneq ($(filter aarch64%,$(UNAME_M)),)
128-
CFLAGS += -mcpu=native
138+
CFLAGS += -mcpu=native
129139
CXXFLAGS += -mcpu=native
130140
endif
131141
ifneq ($(filter armv6%,$(UNAME_M)),)

README.md

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -275,18 +275,19 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
275275
276276
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
277277
278-
- Obtain the `gpt4all-lora-quantized.bin` model
279-
- It is distributed in the old `ggml` format, which is now obsoleted
280-
- You have to convert it to the new format using [./convert-gpt4all-to-ggml.py](./convert-gpt4all-to-ggml.py). You may also need to
281-
convert the model from the old format to the new format with [./migrate-ggml-2023-03-30-pr613.py](./migrate-ggml-2023-03-30-pr613.py):
282-
283-
```bash
284-
python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model
285-
python3 migrate-ggml-2023-03-30-pr613.py models/gpt4all-7B/gpt4all-lora-quantized.bin models/gpt4all-7B/gpt4all-lora-quantized-new.bin
286-
```
287-
288-
- You can now use the newly generated `gpt4all-lora-quantized-new.bin` model in exactly the same way as all other models
289-
- The original model is saved in the same folder with a suffix `.orig`
278+
- Obtain the `tokenizer.model` file from LLaMA model and put it to `models`
279+
- Obtain the `added_tokens.json` file from Alpaca model and put it to `models`
280+
- Obtain the `gpt4all-lora-quantized.bin` file from GPT4All model and put it to `models/gpt4all-7B`
281+
- It is distributed in the old `ggml` format which is now obsoleted
282+
- You have to convert it to the new format using `convert.py`:
283+
284+
```bash
285+
python3 convert.py models/gpt4all-7B/gpt4all-lora-quantized.bin
286+
```
287+
288+
- You can now use the newly generated `models/gpt4all-7B/ggml-model-q4_0.bin` model in exactly the same way as all other models
289+
290+
- The newer GPT4All-J model is not yet supported!
290291

291292
### Obtaining and verifying the Facebook LLaMA original model and Stanford Alpaca model data
292293

SHA256SUMS

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,27 @@
11
700df0d3013b703a806d2ae7f1bfb8e59814e3d06ae78be0c66368a50059f33d models/7B/consolidated.00.pth
2+
666a4bb533b303bdaf89e1b6a3b6f93535d868de31d903afdc20983dc526c847 models/7B/ggml-model-f16.bin
3+
fcb7664c2e69776920b526362a243e912f73c36b1ec892eb354bab940f5edb5a models/7B/ggml-model-q4_0.bin
4+
cc061458339a3eb8bcecbf0a825e9924fb7d1a8150f63cd5d091caa99215aafe models/7B/ggml-model-q4_1.bin
5+
1bc7484c24a87612726d756f1761890e7acf5f412e23378577ce50fbe789b5b8 models/7B/ggml-model-q4_2.bin
6+
3429bf198ec771886cf81a574df45245f3ebf04f0ce0956b73ef5d0ab01ff48b models/7B/ggml-model-q4_3.bin
27
7e89e242ddc0dd6f060b43ca219ce8b3e8f08959a72cb3c0855df8bb04d46265 models/7B/params.json
38
745bf4e29a4dd6f411e72976d92b452da1b49168a4f41c951cfcc8051823cf08 models/13B/consolidated.00.pth
49
d5ccbcc465c71c0de439a5aeffebe8344c68a519bce70bc7f9f92654ee567085 models/13B/consolidated.01.pth
10+
2b206e9b21fb1076f11cafc624e2af97c9e48ea09312a0962153acc20d45f808 models/13B/ggml-model-f16.bin
11+
4b69e4d6b6e3275230955997b90407fceca7e5ab3daf2e63a2c9e7270a8e1e3e models/13B/ggml-model-q4_0.bin
12+
d9581b5b88e5622532fe897c9f9b0e67a317d22dd27a6f90fa4ab8c6d23ccdbb models/13B/ggml-model-q4_1.bin
13+
8d55a2077317ec9a928c7851d6a43e08e51f7e9e08360f2a7a7e1deefea3134f models/13B/ggml-model-q4_2.bin
14+
4208cdec9788ffa48dc1a17af2c36a0299f5bf3eb0e2b87889dda7fad591fca3 models/13B/ggml-model-q4_3.bin
515
4ab77bec4d4405ccb66a97b282574c89a94417e3c32e5f68f37e2876fc21322f models/13B/params.json
616
e23294a58552d8cdec5b7e8abb87993b97ea6eced4178ff2697c02472539d067 models/30B/consolidated.00.pth
717
4e077b7136c7ae2302e954860cf64930458d3076fcde9443f4d0e939e95903ff models/30B/consolidated.01.pth
818
24a87f01028cbd3a12de551dcedb712346c0b5cbdeff1454e0ddf2df9b675378 models/30B/consolidated.02.pth
919
1adfcef71420886119544949767f6a56cb6339b4d5fcde755d80fe68b49de93b models/30B/consolidated.03.pth
20+
7e1b524061a9f4b27c22a12d6d2a5bf13b8ebbea73e99f218809351ed9cf7d37 models/30B/ggml-model-f16.bin
21+
7a679908ce31c9d6ae2e38d6059bcd4d0ad3a870cd58cc1c8f7b36f2b2f51c73 models/30B/ggml-model-q4_0.bin
22+
7b75ac615fa369ee593493a7e6ef87542bf0350255db928b22c5a24f6d598bcd models/30B/ggml-model-q4_1.bin
23+
2c82b4954a94a6a284f452f6011c1e4f0d20362c194a0b1eb5737f5fd8a20fb3 models/30B/ggml-model-q4_2.bin
24+
a6188660199dbcb8d5658abe7d89169869e50423494385830d9e6b330ea7fc33 models/30B/ggml-model-q4_3.bin
1025
2c07118ea98d69dbe7810d88520e30288fa994751b337f8fca02b171955f44cb models/30B/params.json
1126
135c563f6b3938114458183afb01adc9a63bef3d8ff7cccc3977e5d3664ecafe models/65B/consolidated.00.pth
1227
9a600b37b19d38c7e43809485f70d17d1dc12206c07efa83bc72bb498a568bde models/65B/consolidated.01.pth
@@ -16,5 +31,10 @@ e7babf7c5606f165a3756f527cb0fedc4f83e67ef1290391e52fb1cce5f26770 models/65B/con
1631
a287c0dfe49081626567c7fe87f74cce5831f58e459b427b5e05567641f47b78 models/65B/consolidated.05.pth
1732
72b4eba67a1a3b18cb67a85b70f8f1640caae9b40033ea943fb166bd80a7b36b models/65B/consolidated.06.pth
1833
d27f5b0677d7ff129ceacd73fd461c4d06910ad7787cf217b249948c3f3bc638 models/65B/consolidated.07.pth
34+
60758f2384d74e423dffddfd020ffed9d3bb186ebc54506f9c4a787d0f5367b0 models/65B/ggml-model-f16.bin
35+
c671fe1bce71499ac732ec999770ebe53ac486623a7891e42c9dfdb6962d2c64 models/65B/ggml-model-q4_0.bin
36+
4743a28aac3e5f32a6e838a815f51d3779de44fbbe251d745251e66c23c5950f models/65B/ggml-model-q4_1.bin
37+
4a145a210c56982389b1ed34387e0590c3e0d7325fa9be4f2284fe4d244a3633 models/65B/ggml-model-q4_2.bin
38+
305e91a4608b4f627b9b8ad5b4af75187d2684254bfd76dcb9db571618ef293c models/65B/ggml-model-q4_3.bin
1939
999ed1659b469ccc2a941714c0a9656fa571d17c9f7c8c7589817ca90edef51b models/65B/params.json
2040
9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 models/tokenizer.model

examples/alpaca.sh

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,13 @@
77
cd `dirname $0`
88
cd ..
99

10-
./main -m ./models/ggml-alpaca-7b-q4.bin --color -f ./prompts/alpaca.txt --ctx_size 2048 -n -1 -ins -b 256 --top_k 10000 --temp 0.2 --repeat_penalty 1 -t 7
10+
./main -m ./models/ggml-alpaca-7b-q4.bin \
11+
--color \
12+
-f ./prompts/alpaca.txt \
13+
--ctx_size 2048 \
14+
-n -1 \
15+
-ins -b 256 \
16+
--top_k 10000 \
17+
--temp 0.2 \
18+
--repeat_penalty 1.1 \
19+
-t 7

examples/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ struct gpt_params {
2020
int32_t repeat_last_n = 64; // last n tokens to penalize
2121
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
2222
int32_t n_ctx = 512; // context size
23-
int32_t n_batch = 8; // batch size for prompt processing
23+
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
2424
int32_t n_keep = 0; // number of tokens to keep from initial prompt
2525

2626
// sampling parameters

0 commit comments

Comments
 (0)