We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d9791bb commit 6f7dabaCopy full SHA for 6f7daba
tests/test-tokenizer-0.cpp
@@ -14,6 +14,8 @@ static const std::map<std::string, std::vector<llama_token>> & k_tests()
14
{ " this is 🦙.cpp", { 1, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
15
{ "w048 7tuijk dsdfhu", { 1, 29893, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
16
{ "нещо на Български", { 1, 821, 4851, 665, 1386, 29713, 1305, }, },
17
+ { "<🦙>test extra_id_1 test", { 1, 32003, 1688, 29871, 32001, 259, 1243, }, },
18
+ { "<🦙>test extra_id_100 test", { 1, 32003, 1688, 29871, 32002, 1243, }, },
19
};
20
return _k_tests;
21
@@ -46,6 +48,10 @@ int main(int argc, char **argv) {
46
48
return 1;
47
49
}
50
51
+ llama_add_special_token(model, "extra_id_1", 32001);
52
+ llama_add_special_token(model, "extra_id_100", 32002);
53
+ llama_add_special_token(model, "<🦙>", 32003);
54
+
55
ctx = llama_new_context_with_model(model, lparams);
56
57
if (ctx == NULL) {
0 commit comments