Skip to content

Commit 9e70cc0

Browse files
authored
Add test for MPT tokenization (#3728)
* Add test for MPT tokenization * Revert code motion * Remove unnecessary restriction in test case * Clarify logic in conversion
1 parent 5a42a5f commit 9e70cc0

File tree

4 files changed

+21
-12
lines changed

4 files changed

+21
-12
lines changed

convert-mpt-hf-to-gguf.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -128,15 +128,22 @@ def parse_args() -> argparse.Namespace:
128128
# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py
129129
tokenizer = AutoTokenizer.from_pretrained(dir_model)
130130

131+
added_vocab = tokenizer.get_added_vocab()
131132
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
132133

133134
for i in range(vocab_size):
134-
tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]")
135-
scores.append(0.0) # dummy
136-
toktypes.append(gguf.TokenType.NORMAL)
135+
if i not in reverse_vocab:
136+
tokens.append(f"[PAD{i}]")
137+
toktypes.append(gguf.TokenType.USER_DEFINED)
138+
elif reverse_vocab[i] in added_vocab:
139+
# NOTE: wouldn't we like to distinguish CONTROL tokens here?
140+
tokens.append(reverse_vocab[i])
141+
toktypes.append(gguf.TokenType.USER_DEFINED)
142+
else:
143+
tokens.append(reverse_vocab[i])
144+
toktypes.append(gguf.TokenType.NORMAL)
137145

138146
gguf_writer.add_token_list(tokens)
139-
gguf_writer.add_token_scores(scores)
140147
gguf_writer.add_token_types(toktypes)
141148

142149
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))

llama.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -975,14 +975,15 @@ static void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
975975
(void) tensor;
976976
}
977977

978-
static std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) {
978+
static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
979979
std::vector<char> result(8, 0);
980980
const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
981981
if (n_tokens < 0) {
982982
result.resize(-n_tokens);
983983
int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
984984
GGML_ASSERT(check == -n_tokens);
985-
} else {
985+
}
986+
else {
986987
result.resize(n_tokens);
987988
}
988989

@@ -1202,10 +1203,10 @@ struct llama_vocab {
12021203
id special_eot_id = 32010;
12031204

12041205
int find_bpe_rank(std::string token_left, std::string token_right) const {
1205-
replace_all(token_left, " ", "\u0120");
1206-
replace_all(token_left, "\n", "\u010A");
1207-
replace_all(token_right, " ", "\u0120");
1208-
replace_all(token_right, "\n", "\u010A");
1206+
GGML_ASSERT(token_left.find(" ") == std::string::npos);
1207+
GGML_ASSERT(token_left.find("\n") == std::string::npos);
1208+
GGML_ASSERT(token_right.find(" ") == std::string::npos);
1209+
GGML_ASSERT(token_right.find("\n") == std::string::npos);
12091210

12101211
auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
12111212
if (it == bpe_ranks.end()) {
@@ -7499,7 +7500,7 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c
74997500

75007501
for (size_t i = 0; i < candidates->size; ++i) {
75017502
const llama_token id = candidates->data[i].id;
7502-
const std::string piece = llama_token_to_str(ctx, id);
7503+
const std::string piece = llama_token_to_piece(ctx, id);
75037504
if (id == eos) {
75047505
if (!allow_eos) {
75057506
candidates->data[i].logit = -INFINITY;
@@ -7711,7 +7712,7 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar
77117712
GGML_ASSERT(false);
77127713
}
77137714

7714-
const std::string piece = llama_token_to_str(ctx, token);
7715+
const std::string piece = llama_token_to_piece(ctx, token);
77157716

77167717
// Note terminating 0 in decoded string
77177718
const auto decoded = decode_utf8(piece.c_str(), grammar->partial_utf8);

models/ggml-vocab-mpt.gguf

1.69 MB
Binary file not shown.

tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ llama_test_executable (test-tokenizer-1-llama test-tokenizer-1-llama.cpp ${CMAKE
3131
llama_build_executable(test-tokenizer-1-bpe.cpp)
3232
llama_test_executable (test-tokenizer-1-falcon test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
3333
llama_test_executable(test-tokenizer-1-aquila test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
34+
llama_test_executable(test-tokenizer-1-mpt test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
3435
llama_build_and_test_executable(test-grammar-parser.cpp)
3536
llama_build_and_test_executable(test-llama-grammar.cpp)
3637
llama_build_and_test_executable(test-grad0.cpp) # SLOW

0 commit comments

Comments
 (0)