Skip to content

Commit 26f606e

Browse files
committed
Merge branch 'master' into gg/add-tokenizer-test-script
2 parents 9745cf8 + a2ac89d commit 26f606e

29 files changed

+680
-536
lines changed

.flake8

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
[flake8]
22
max-line-length = 125
3-
ignore = W503
3+
ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
4+
exclude = examples/*,examples/*/**,*/**/__init__.py

.github/workflows/python-lint.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,5 +20,4 @@ jobs:
2020
- name: flake8 Lint
2121
uses: py-actions/flake8@v2
2222
with:
23-
ignore: "E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503"
24-
exclude: "examples/*,examples/*/**,*/**/__init__.py,convert-hf-to-gguf-update.py"
23+
plugins: "flake8-no-print"

.pre-commit-config.yaml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,14 @@
33
exclude: prompts/.*.txt
44
repos:
55
- repo: https://github.com/pre-commit/pre-commit-hooks
6-
rev: v3.2.0
6+
rev: v4.6.0
77
hooks:
88
- id: trailing-whitespace
99
- id: end-of-file-fixer
1010
- id: check-yaml
1111
- id: check-added-large-files
1212
- repo: https://github.com/PyCQA/flake8
13-
rev: 6.0.0
13+
rev: 7.0.0
1414
hooks:
1515
- id: flake8
16+
additional_dependencies: [flake8-no-print]

common/log.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
234234
// INTERNAL, DO NOT USE
235235
// USE LOG() INSTEAD
236236
//
237-
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
237+
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER) || defined(__clang__)
238238
#define LOG_IMPL(str, ...) \
239239
do { \
240240
if (LOG_TARGET != nullptr) \
@@ -257,7 +257,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
257257
// INTERNAL, DO NOT USE
258258
// USE LOG_TEE() INSTEAD
259259
//
260-
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
260+
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER) || defined(__clang__)
261261
#define LOG_TEE_IMPL(str, ...) \
262262
do { \
263263
if (LOG_TARGET != nullptr) \

convert-hf-to-gguf-update.py

Lines changed: 83 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -21,57 +21,65 @@
2121
# TODO: automate the update of convert-hf-to-gguf.py
2222
#
2323

24+
import logging
2425
import os
2526
import requests
2627
import sys
2728
import json
2829

2930
from hashlib import sha256
3031
from enum import IntEnum, auto
32+
from transformers import AutoTokenizer
33+
34+
logger = logging.getLogger("convert-hf-to-gguf-update")
35+
3136

3237
class TOKENIZER_TYPE(IntEnum):
3338
SPM = auto()
3439
BPE = auto()
3540
WPM = auto()
3641

42+
3743
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
3844
# will be updated with time - contributions welcome
3945
chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
4046

4147
if len(sys.argv) == 2:
4248
token = sys.argv[1]
4349
else:
44-
print("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
50+
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
4551
sys.exit(1)
4652

4753
# TODO: add models here, base models preferred
4854
models = [
49-
{ "name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
50-
{ "name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", },
51-
{ "name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", },
52-
{ "name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", },
53-
{ "name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
54-
{ "name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
55-
{ "name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
56-
{ "name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
57-
{ "name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
58-
{ "name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
59-
{ "name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
60-
]
55+
{"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
56+
{"name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", },
57+
{"name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", },
58+
{"name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", },
59+
{"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
60+
{"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
61+
{"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
62+
{"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
63+
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
64+
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
65+
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
66+
]
6167

6268
# make directory "models/tokenizers" if it doesn't exist
6369
if not os.path.exists("models/tokenizers"):
6470
os.makedirs("models/tokenizers")
6571

72+
6673
def download_file_with_auth(url, token, save_path):
6774
headers = {"Authorization": f"Bearer {token}"}
6875
response = requests.get(url, headers=headers)
6976
if response.status_code == 200:
7077
with open(save_path, 'wb') as f:
7178
f.write(response.content)
72-
print(f"File {save_path} downloaded successfully")
79+
logger.info(f"File {save_path} downloaded successfully")
7380
else:
74-
print(f"Failed to download file. Status code: {response.status_code}")
81+
logger.info(f"Failed to download file. Status code: {response.status_code}")
82+
7583

7684
# download the tokenizer models
7785
for model in models:
@@ -82,10 +90,10 @@ def download_file_with_auth(url, token, save_path):
8290
if not os.path.exists(f"models/tokenizers/{name}"):
8391
os.makedirs(f"models/tokenizers/{name}")
8492
else:
85-
print(f"Directory models/tokenizers/{name} already exists - skipping")
93+
logger.info(f"Directory models/tokenizers/{name} already exists - skipping")
8694
continue
8795

88-
print(f"Downloading {name} to models/tokenizers/{name}")
96+
logger.info(f"Downloading {name} to models/tokenizers/{name}")
8997

9098
url = f"{repo}/raw/main/config.json"
9199
save_path = f"models/tokenizers/{name}/config.json"
@@ -116,76 +124,76 @@ def download_file_with_auth(url, token, save_path):
116124
continue
117125

118126
# create the tokenizer
119-
from transformers import AutoTokenizer
120127
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
121128

122129
chktok = tokenizer.encode(chktxt)
123130
chkhsh = sha256(str(chktok).encode()).hexdigest()
124131

125-
print(f"model: {name}")
126-
print(f"tokt: {tokt}")
127-
print(f"repo: {model['repo']}")
128-
print(f"chktok: {chktok}")
129-
print(f"chkhsh: {chkhsh}")
132+
logger.info(f"model: {name}")
133+
logger.info(f"tokt: {tokt}")
134+
logger.info(f"repo: {model['repo']}")
135+
logger.info(f"chktok: {chktok}")
136+
logger.info(f"chkhsh: {chkhsh}")
130137

131138
# print the "pre_tokenizer" content from the tokenizer.json
132139
with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f:
133140
cfg = json.load(f)
134141
pre_tokenizer = cfg["pre_tokenizer"]
135-
print("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
142+
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
136143

137-
print(f"\n")
144+
logger.info("")
138145

139146
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
140147
src_ifs += f" # ref: {model['repo']}\n"
141148
src_ifs += f" res = \"{name}\"\n"
142149

143-
src_func = ""
144-
src_func += " def get_vocab_base_pre(self, tokenizer) -> str:\n"
145-
src_func += " # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that\n"
146-
src_func += " # is specific for the BPE pre-tokenizer used by the model\n"
147-
src_func += " # we will use this unique identifier to write a \"tokenizer.ggml.pre\" entry in the GGUF file which we can\n"
148-
src_func += " # use in llama.cpp to implement the same pre-tokenizer\n"
149-
src_func += "\n"
150-
src_func += f" chktxt = {repr(chktxt)}\n"
151-
src_func += "\n"
152-
src_func += " chktok = tokenizer.encode(chktxt)\n"
153-
src_func += " chkhsh = sha256(str(chktok).encode()).hexdigest()\n"
154-
src_func += "\n"
155-
src_func += " print(f\"chktok: {chktok}\")\n"
156-
src_func += " print(f\"chkhsh: {chkhsh}\")\n"
157-
src_func += "\n"
158-
src_func += " res = None\n"
159-
src_func += "\n"
160-
src_func += " # NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script\n"
161-
src_func += " # or pull the latest version of the model from Huggingface\n"
162-
src_func += " # don't edit the hashes manually!\n"
163-
src_func += f"{src_ifs}\n"
164-
src_func += " if res is None:\n"
165-
src_func += " print(\"\\n\")\n"
166-
src_func += " print(\"**************************************************************************************\")\n"
167-
src_func += " print(\"** WARNING: The BPE pre-tokenizer was not recognized!\")\n"
168-
src_func += " print(\"** There are 2 possible reasons for this:\")\n"
169-
src_func += " print(\"** - the model has not been added to convert-hf-to-gguf-update.py yet\")\n"
170-
src_func += " print(\"** - the pre-tokenization config has changed upstream\")\n"
171-
src_func += " print(\"** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.\")\n"
172-
src_func += " print(\"** ref: https://github.com/ggerganov/llama.cpp/pull/6920\")\n"
173-
src_func += " print(\"**\")\n"
174-
src_func += " print(f\"** chkhsh: {chkhsh}\")\n"
175-
src_func += " print(\"**************************************************************************************\")\n"
176-
src_func += " print(\"\\n\")\n"
177-
src_func += " raise NotImplementedError(\"BPE pre-tokenizer was not recognized - update get_vocab_base_pre()\")\n"
178-
src_func += "\n"
179-
src_func += " print(f\"tokenizer.ggml.pre: {res}\")\n"
180-
src_func += " print(f\"chkhsh: {chkhsh}\")\n"
181-
src_func += "\n"
182-
src_func += " return res\n"
183-
184-
print(src_func)
185-
186-
print("\n")
187-
print("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!")
188-
print("\n")
150+
src_func = f"""
151+
def get_vocab_base_pre(self, tokenizer) -> str:
152+
# encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
153+
# is specific for the BPE pre-tokenizer used by the model
154+
# we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
155+
# use in llama.cpp to implement the same pre-tokenizer
156+
157+
chktxt = {repr(chktxt)}
158+
159+
chktok = tokenizer.encode(chktxt)
160+
chkhsh = sha256(str(chktok).encode()).hexdigest()
161+
162+
print(f"chktok: {{chktok}}")
163+
print(f"chkhsh: {{chkhsh}}")
164+
165+
res = None
166+
167+
# NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script
168+
# or pull the latest version of the model from Huggingface
169+
# don't edit the hashes manually!
170+
{src_ifs}
171+
if res is None:
172+
print("\\n")
173+
print("**************************************************************************************")
174+
print("** WARNING: The BPE pre-tokenizer was not recognized!")
175+
print("** There are 2 possible reasons for this:")
176+
print("** - the model has not been added to convert-hf-to-gguf-update.py yet")
177+
print("** - the pre-tokenization config has changed upstream")
178+
print("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
179+
print("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
180+
print("**")
181+
print(f"** chkhsh: {{chkhsh}}")
182+
print("**************************************************************************************")
183+
print("\\n")
184+
raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
185+
186+
print(f"tokenizer.ggml.pre: {{repr(res)}}")
187+
print(f"chkhsh: {{chkhsh}}")
188+
189+
return res
190+
"""
191+
192+
print(src_func) # noqa: NP100
193+
194+
logger.info("\n")
195+
logger.info("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!")
196+
logger.info("\n")
189197

190198
# generate tests for each tokenizer model
191199

@@ -253,7 +261,6 @@ def download_file_with_auth(url, token, save_path):
253261
tokt = model["tokt"]
254262

255263
# create the tokenizer
256-
from transformers import AutoTokenizer
257264
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
258265

259266
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
@@ -268,15 +275,15 @@ def download_file_with_auth(url, token, save_path):
268275
f.write(f" {r}")
269276
f.write("\n")
270277

271-
print(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*")
278+
logger.info(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*")
272279

273280
# generate commands for creating vocab files
274281

275-
print("\nRun the following commands to generate the vocab files for testing:\n")
282+
logger.info("\nRun the following commands to generate the vocab files for testing:\n")
276283

277284
for model in models:
278285
name = model["name"]
279286

280-
print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only")
287+
logger.info(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only")
281288

282-
print("\n")
289+
logger.info("\n")

0 commit comments

Comments
 (0)