|
| 1 | +#include "common.h" |
| 2 | +#include "llama.h" |
| 3 | + |
| 4 | +#include <cstdio> |
| 5 | +#include <string> |
| 6 | +#include <vector> |
| 7 | + |
| 8 | +static int unicode_to_utf8(int codepoint, char *dest) { |
| 9 | + // https://stackoverflow.com/a/4609989 — who needs iconv? |
| 10 | + if (codepoint < 0x80) { |
| 11 | + *dest++ = codepoint; |
| 12 | + } else if (codepoint < 0x800) { |
| 13 | + *dest++ = 192 + codepoint / 64, *dest++ = 128 + codepoint % 64; |
| 14 | + // we also support reserved utf-16 surrogates 0xd800 - 0xdfff for simplicity |
| 15 | + } else if (codepoint < 0x10000) { |
| 16 | + *dest++ = 224 + codepoint / 4096, *dest++ = 128 + codepoint / 64 % 64, |
| 17 | + *dest++ = 128 + codepoint % 64; |
| 18 | + } else if (codepoint < 0x110000) { |
| 19 | + *dest++ = 240 + codepoint / 262144, *dest++ = 128 + codepoint / 4096 % 64, |
| 20 | + *dest++ = 128 + codepoint / 64 % 64, *dest++ = 128 + codepoint % 64; |
| 21 | + } else { |
| 22 | + return 1; |
| 23 | + } |
| 24 | + return 0; |
| 25 | +} |
| 26 | + |
| 27 | +int main(int argc, char **argv) { |
| 28 | + if (argc < 2) { |
| 29 | + printf("usage: %s MODEL_PATH\n", argv[0]); |
| 30 | + return 1; |
| 31 | + } |
| 32 | + |
| 33 | + const char *model_path = argv[1]; |
| 34 | + |
| 35 | + llama_backend_init(); |
| 36 | + |
| 37 | + llama_model_params model_params = llama_model_default_params(); |
| 38 | + model_params.vocab_only = true; |
| 39 | + llama_model *model = llama_load_model_from_file(model_path, model_params); |
| 40 | + |
| 41 | + std::vector<llama_token> tokens; |
| 42 | + |
| 43 | + int failed_ascii = 0; |
| 44 | + int ascii_max = 127; |
| 45 | + for (int c = 0; c <= ascii_max; c++) { |
| 46 | + const char prompt[] = {(char)c, '\0'}; |
| 47 | + try { |
| 48 | + tokens = ::llama_tokenize(model, prompt, false, true); |
| 49 | + } catch (...) { |
| 50 | + printf("%#x -> Tokenization failed for char '%c'\n", c, (char)c); |
| 51 | + failed_ascii += 1; |
| 52 | + continue; |
| 53 | + } |
| 54 | + } |
| 55 | + printf("%d/%d 7-bit ascii characters could not be tokenized\n", failed_ascii, ascii_max); |
| 56 | + |
| 57 | + int failed_unicode = 0; |
| 58 | + int utf8_max = 0x10FFFF; |
| 59 | + // Now let's do all potential codepoints |
| 60 | + for (int cp = 0; cp <= utf8_max; cp++) { |
| 61 | + char buf[5] = {}; |
| 62 | + if (unicode_to_utf8(cp, buf)) { |
| 63 | + printf("Impossible to encode codepoint %#x\n", cp); |
| 64 | + continue; |
| 65 | + } |
| 66 | + try { |
| 67 | + tokens = ::llama_tokenize(model, buf, false, true); |
| 68 | + } catch (...) { |
| 69 | + // printf("%#x -> Tokenization failed for codepoint '%s'\n", cp, buf); |
| 70 | + failed_unicode += 1; |
| 71 | + continue; |
| 72 | + } |
| 73 | + } |
| 74 | + printf("%d/%d potential unicode codepoints not tokenized\n", failed_unicode, |
| 75 | + utf8_max); |
| 76 | + |
| 77 | + return (failed_ascii != 0 || failed_unicode != 0); |
| 78 | +} |
0 commit comments