Skip to content

Commit 56e659a

Browse files
authored
fix perplexity after c-api refactor (#390)
* preallocate a buffer of fitting size for tokenization (utils.cpp) * don't create a new std::string (especially here, where it's usually large)
1 parent 40ea807 commit 56e659a

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

main.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
8585
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
8686
// Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
8787
// Output: `perplexity: 13.5106 [114/114]`
88-
auto tokens = ::llama_tokenize(ctx, params.prompt.c_str(), true);
88+
auto tokens = ::llama_tokenize(ctx, params.prompt, true);
8989

9090
int count = 0;
9191
double nll = 0.0;

utils.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,8 +146,10 @@ std::string gpt_random_prompt(std::mt19937 & rng) {
146146

147147
// TODO: not great allocating this every time
148148
std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) {
149-
std::vector<llama_token> res(8096);
149+
// initialize to prompt numer of chars, since n_tokens <= n_prompt_chars
150+
std::vector<llama_token> res(text.size() + (int)add_bos);
150151
int n = llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
152+
assert(n >= 0);
151153
res.resize(n);
152154

153155
return res;

0 commit comments

Comments
 (0)