Skip to content

Commit dfa058e

Browse files
committed
examples : no longer manually add leading space when tokenizing
1 parent 1e7a033 commit dfa058e

File tree

2 files changed

+0
-3
lines changed

2 files changed

+0
-3
lines changed

examples/main/main.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,6 @@ int main(int argc, char ** argv) {
211211
int guidance_offset = 0;
212212
int original_prompt_len = 0;
213213
if (ctx_guidance) {
214-
params.cfg_negative_prompt.insert(0, 1, ' ');
215214
guidance_inp = ::llama_tokenize(ctx_guidance, params.cfg_negative_prompt, add_bos);
216215

217216
std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos);

examples/server/server.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,6 @@ struct llama_server_context
286286
std::vector<llama_token> p;
287287
if (first)
288288
{
289-
s.insert(0, 1, ' '); // add a space if it's the first
290289
p = ::llama_tokenize(ctx, s, add_bos);
291290
first = false;
292291
}
@@ -309,7 +308,6 @@ struct llama_server_context
309308
else
310309
{
311310
auto s = json_prompt.template get<std::string>();
312-
s.insert(0, 1, ' '); // always add a first space
313311
prompt_tokens = ::llama_tokenize(ctx, s, add_bos);
314312
}
315313

0 commit comments

Comments
 (0)