Skip to content

Commit 4cd621c

Browse files
authored
convert : add BPE pre-tokenization for DBRX (#7132)
* Add BPE pre-tokenization for DBRX. * Add vocab GGUFs. * Remove test. * Remove GGUFs.
1 parent 7e0b6a7 commit 4cd621c

File tree

4 files changed

+9
-0
lines changed

4 files changed

+9
-0
lines changed

convert-hf-to-gguf-update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ class TOKENIZER_TYPE(IntEnum):
6868
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
6969
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
7070
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
71+
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
7172
]
7273

7374
# make directory "models/tokenizers" if it doesn't exist

convert-hf-to-gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
317317
if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
318318
# ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
319319
res = "olmo"
320+
if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
321+
# ref: https://huggingface.co/databricks/dbrx-instruct
322+
res = "dbrx"
320323

321324
if res is None:
322325
logger.warning("\n")

llama.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4394,6 +4394,9 @@ static void llm_load_vocab(
43944394
} else if (
43954395
tokenizer_pre == "olmo") {
43964396
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
4397+
} else if (
4398+
tokenizer_pre == "dbrx") {
4399+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
43974400
} else {
43984401
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
43994402
}
@@ -12200,6 +12203,7 @@ struct llm_tokenizer_bpe {
1220012203
case LLAMA_VOCAB_TYPE_BPE:
1220112204
switch (vocab.type_pre) {
1220212205
case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
12206+
case LLAMA_VOCAB_PRE_TYPE_DBRX:
1220312207
word_collection = unicode_regex_split(text, {
1220412208
// original regex from tokenizer.json
1220512209
//"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",

llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ extern "C" {
8282
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
8383
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
8484
LLAMA_VOCAB_PRE_TYPE_OLMO = 10,
85+
LLAMA_VOCAB_PRE_TYPE_DBRX = 11,
8586
};
8687

8788
// note: these values should be synchronized with ggml_rope

0 commit comments

Comments
 (0)