Skip to content

Commit d7e852c

Browse files
jaime-m-pggerganov
andauthored
Tokenizer SPM fixes for phi-3 and llama-spm (bugfix) (#7425)
* Update brute force test: add_special * Update brute force test: default values for add_bos_token and add_eos_token * Enable rtrim when pre-inserting BOS Co-authored-by: Georgi Gerganov <[email protected]> * Revert "server : fix test regexes"
1 parent 917dc8c commit d7e852c

File tree

5 files changed

+27
-22
lines changed

5 files changed

+27
-22
lines changed

convert-hf-to-gguf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1749,7 +1749,7 @@ def set_vocab(self):
17491749
token_id = int(token_id)
17501750
token = foken_data["content"].encode("utf-8")
17511751
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
1752-
assert(tokens[token_id] == token)
1752+
assert tokens[token_id] == token
17531753
tokens[token_id] = token
17541754
scores[token_id] = -1000.0
17551755
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
@@ -1765,7 +1765,7 @@ def set_vocab(self):
17651765
token_id = int(foken_data["id"])
17661766
token = foken_data["content"].encode("utf-8")
17671767
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
1768-
assert(tokens[token_id] == token)
1768+
assert tokens[token_id] == token
17691769
tokens[token_id] = token
17701770
scores[token_id] = -1000.0
17711771
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED

examples/server/tests/features/server.feature

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ Feature: llama.cpp server
3737

3838
Examples: Prompts
3939
| prompt | n_predict | re_content | n_prompt | n_predicted | truncated |
40-
| I believe the meaning of life is | 8 | (read\|going\|pretty)+ | 18 | 8 | not |
41-
| Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids\|Anna\|forest)+ | 45 | 64 | not |
40+
| I believe the meaning of life is | 8 | (read\|going)+ | 18 | 8 | not |
41+
| Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids\|Anna\|forest)+ | 46 | 64 | not |
4242

4343
Scenario: Completion prompt truncated
4444
Given a prompt:
@@ -67,8 +67,8 @@ Feature: llama.cpp server
6767

6868
Examples: Prompts
6969
| model | system_prompt | user_prompt | max_tokens | re_content | n_prompt | n_predicted | enable_streaming | truncated |
70-
| llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 76 | 8 | disabled | not |
71-
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird\|fireplace)+ | -1 | 64 | enabled | |
70+
| llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 77 | 8 | disabled | not |
71+
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird\|Annabyear)+ | -1 | 64 | enabled | |
7272

7373

7474
Scenario Outline: OAI Compatibility w/ response format
@@ -84,7 +84,7 @@ Feature: llama.cpp server
8484
| response_format | n_predicted | re_content |
8585
| {"type": "json_object", "schema": {"const": "42"}} | 5 | "42" |
8686
| {"type": "json_object", "schema": {"items": [{"type": "integer"}]}} | 10 | \[ -300 \] |
87-
| {"type": "json_object"} | 10 | \{ " Saragine. |
87+
| {"type": "json_object"} | 10 | \{ " Jacky. |
8888

8989

9090
Scenario: Tokenize / Detokenize

examples/server/tests/features/slotsave.feature

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ Feature: llama.cpp server slot management
2626
# Since we have cache, this should only process the last tokens
2727
Given a user prompt "What is the capital of Germany?"
2828
And a completion request with no api error
29-
Then 24 tokens are predicted matching (Thank|special|Lily)
29+
Then 24 tokens are predicted matching (Thank|special)
3030
And 7 prompt tokens are processed
3131
# Loading the original cache into slot 0,
3232
# we should only be processing 1 prompt token and get the same output
@@ -41,7 +41,7 @@ Feature: llama.cpp server slot management
4141
Given a user prompt "What is the capital of Germany?"
4242
And using slot id 1
4343
And a completion request with no api error
44-
Then 24 tokens are predicted matching (Thank|special|Lily)
44+
Then 24 tokens are predicted matching (Thank|special)
4545
And 1 prompt tokens are processed
4646

4747
Scenario: Erase Slot

llama.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12498,15 +12498,16 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
1249812498
// tokenizer.encode('', add_special_tokens=True) returns [1]
1249912499
// tokenizer.encode('', add_special_tokens=False) returns []
1250012500

12501+
static const bool rtrim = true; //TODO: as param
12502+
bool is_prev_special = false;
12503+
bool special_token_rtrim = false;
12504+
1250112505
if (add_special && vocab.special_add_bos != 0) {
1250212506
GGML_ASSERT(vocab.special_bos_id != -1);
1250312507
output.push_back(vocab.special_bos_id);
12508+
is_prev_special = true;
1250412509
}
1250512510

12506-
static const bool rtrim = true; //TODO: as param
12507-
bool is_prev_special = false;
12508-
bool special_token_rtrim = false;
12509-
1251012511
for (const auto & fragment : fragment_buffer) {
1251112512
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1251212513
// without adding this leading whitespace, we do not get the same results as the original tokenizer

tests/test-tokenizer-random.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -154,19 +154,22 @@ def generator_custom_text_edge_cases() -> Iterator[str]:
154154
'\uFEFF//', # unicode_ranges_control, 0xFEFF (BOM)
155155
'Cửa Việt', # llama-3, ignore_merges = true
156156
'<s>a', # Phi-3 fail
157-
'<unk><|endoftext|><s>' # Phi-3 fail
157+
'<unk><|endoftext|><s>', # Phi-3 fail
158158
'a\na', # TODO: Bert fail
159159
]
160160

161161

162-
def generator_random_special_tokens(special_tokens:list[str], iterations=100) -> Iterator[str]:
163-
special_tokens = set(special_tokens)
162+
def generator_random_special_tokens(tokenizer, iterations=100) -> Iterator[str]:
163+
special_tokens = set(tokenizer.all_special_tokens)
164164
special_tokens.update([" ", "\n", "\t", "-", "!", "one", "1", "<s>", "</s>"])
165165
special_tokens = list(sorted(special_tokens))
166166
rand = random.Random()
167167
for m in range(iterations):
168168
rand.seed(m)
169169
words = rand.choices(special_tokens, k=500)
170+
if tokenizer.add_bos_token: # skip spam warning of double BOS
171+
while words and words[0] == tokenizer.bos_token:
172+
words.pop(0)
170173
yield "".join(words)
171174

172175

@@ -290,18 +293,19 @@ def main(argv: list[str] = None):
290293
model = LibLlamaModel(LibLlama(), args.vocab_file, mparams=dict(vocab_only=True), cparams=dict(n_ctx=4096))
291294
tokenizer = AutoTokenizer.from_pretrained(args.dir_tokenizer)
292295

293-
def func_tokenize2(text: str):
294-
return tokenizer.encode(text, add_special_tokens=False)
295-
296-
parse_special = all(len(func_tokenize2(t)) == 1 for t in tokenizer.all_special_tokens)
296+
tokenizer.add_bos_token = getattr(tokenizer, "add_bos_token", True)
297+
tokenizer.add_eos_token = getattr(tokenizer, "add_eos_token", False)
297298

298299
def func_tokenize1(text: str):
299-
return model.tokenize(text, add_special=False, parse_special=parse_special)
300+
return model.tokenize(text, add_special=True, parse_special=True)
301+
302+
def func_tokenize2(text: str):
303+
return tokenizer.encode(text, add_special_tokens=True)
300304

301305
vocab = list(sorted(tokenizer.batch_decode(list(tokenizer.get_vocab().values()), skip_special_tokens=True)))
302306
test_compare_tokenizer(func_tokenize1, func_tokenize2, generator_custom_text())
303307
test_compare_tokenizer(func_tokenize1, func_tokenize2, generator_custom_text_edge_cases())
304-
test_compare_tokenizer(func_tokenize1, func_tokenize2, generator_random_special_tokens(tokenizer.all_special_tokens, 10_000))
308+
test_compare_tokenizer(func_tokenize1, func_tokenize2, generator_random_special_tokens(tokenizer, 10_000))
305309
test_compare_tokenizer(func_tokenize1, func_tokenize2, generator_vocab_words(vocab))
306310
test_compare_tokenizer(func_tokenize1, func_tokenize2, generator_random_chars(10_000))
307311
test_compare_tokenizer(func_tokenize1, func_tokenize2, generator_random_vocab_chars(vocab, 10_000))

0 commit comments

Comments
 (0)