diff --git a/convert.py b/convert.py old mode 100755 new mode 100644 index 4ba36f2801891..0428c229f6ddb --- a/convert.py +++ b/convert.py @@ -741,6 +741,8 @@ def add_meta_vocab(self, vocab: Vocab) -> None: tokens = [] scores = [] toktypes = [] + # NOTE: `all_tokens` returns the the base vocabulary and added tokens + # TODO: add special tokens? for text, score, toktype in vocab.all_tokens(): tokens.append(text) scores.append(score) @@ -751,8 +753,6 @@ def add_meta_vocab(self, vocab: Vocab) -> None: self.gguf.add_token_scores(scores) self.gguf.add_token_types(toktypes) - # TODO: added / special tokens - def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: n_elements = 1 for dim in tensor.shape: diff --git a/tests/test-tokenizer-1.cpp b/tests/test-tokenizer-1.cpp index a8a7e88988260..d8db7cd96eaa4 100644 --- a/tests/test-tokenizer-1.cpp +++ b/tests/test-tokenizer-1.cpp @@ -87,8 +87,8 @@ int main(int argc, char **argv) { return 2; } } else { - // TODO: needs access to token types - if (0 <= i && i < 259) { + llama_token_type type = llama_token_get_type(ctx, i); + if (type == LLAMA_TOKEN_TYPE_UNKNOWN || type == LLAMA_TOKEN_TYPE_CONTROL || type == LLAMA_TOKEN_TYPE_BYTE) { fprintf(stderr, "%s : info: token %d is string %s and bpe returns tokens %s\n", __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str()); } else {