Skip to content

Commit 87d6f8c

Browse files
committed
restore original implementation with new names
1 parent 7b4a327 commit 87d6f8c

File tree

3 files changed

+13
-23
lines changed

3 files changed

+13
-23
lines changed

examples/common.cpp

+3-12
Original file line numberDiff line numberDiff line change
@@ -128,12 +128,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
128128
break;
129129
}
130130
params.path_prompt_cache = argv[i];
131-
} else if (arg == "--session") {
132-
if (++i >= argc) {
133-
invalid_param = true;
134-
break;
135-
}
136-
params.path_session = argv[i];
131+
} else if (arg == "--prompt-cache-all") {
132+
params.prompt_cache_save_all = true;
137133
} else if (arg == "-f" || arg == "--file") {
138134
if (++i >= argc) {
139135
invalid_param = true;
@@ -350,11 +346,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
350346
gpt_print_usage(argc, argv, default_params);
351347
exit(1);
352348
}
353-
if (!params.path_session.empty() && !params.path_prompt_cache.empty()) {
354-
fprintf(stderr, "error: only one of --prompt-cache or --session may be specified\n");
355-
gpt_print_usage(argc, argv, default_params);
356-
exit(1);
357-
}
358349
if (escape_prompt) {
359350
process_escapes(params.prompt);
360351
}
@@ -380,7 +371,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
380371
fprintf(stderr, " prompt to start generation with (default: empty)\n");
381372
fprintf(stderr, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
382373
fprintf(stderr, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n");
383-
fprintf(stderr, " --session FNAME file to store prompt and generations, allowing continuation (default: none)\n");
374+
fprintf(stderr, " --prompt-cache-all if specified, saves user input and generations to cache as well\n");
384375
fprintf(stderr, " --random-prompt start with a randomized prompt.\n");
385376
fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n");
386377
fprintf(stderr, " --in-suffix STRING string to suffix after user inputs with (default: empty)\n");

examples/common.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,18 @@ struct gpt_params {
4242
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
4343
std::string prompt = "";
4444
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
45-
std::string path_session = ""; // file for saving/loading prompt and generations
4645
std::string input_prefix = ""; // string to prefix user inputs with
4746
std::string input_suffix = ""; // string to suffix user inputs with
4847
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
4948

5049
std::string lora_adapter = ""; // lora adapter path
5150
std::string lora_base = ""; // base model path for the lora adapter
5251

53-
bool memory_f16 = true; // use f16 instead of f32 for memory kv
54-
bool random_prompt = false; // do not randomize prompt if none provided
55-
bool use_color = false; // use color to distinguish generations and inputs
56-
bool interactive = false; // interactive mode
52+
bool memory_f16 = true; // use f16 instead of f32 for memory kv
53+
bool random_prompt = false; // do not randomize prompt if none provided
54+
bool use_color = false; // use color to distinguish generations and inputs
55+
bool interactive = false; // interactive mode
56+
bool prompt_cache_save_all = false; // save user input and generations to prompt cache
5757

5858
bool embedding = false; // get only sentence embedding
5959
bool interactive_first = false; // wait for user input immediately

examples/main/main.cpp

+5-6
Original file line numberDiff line numberDiff line change
@@ -140,10 +140,9 @@ int main(int argc, char ** argv) {
140140
// Add a space in front of the first character to match OG llama tokenizer behavior
141141
params.prompt.insert(0, 1, ' ');
142142

143-
std::string path_session =
144-
!params.path_session.empty() ? params.path_session : params.path_prompt_cache;
143+
std::string path_session = params.path_prompt_cache;
144+
const bool session_save_all = params.prompt_cache_save_all;
145145
std::vector<llama_token> session_tokens;
146-
bool resume_session = !params.path_session.empty();
147146

148147
if (!path_session.empty()) {
149148
fprintf(stderr, "%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
@@ -316,8 +315,8 @@ int main(int argc, char ** argv) {
316315
// insert n_left/2 tokens at the start of embd from last_n_tokens
317316
embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size());
318317

319-
// stop saving session if we run out of context, saving whatever was evaled
320-
if (!path_session.empty() && resume_session) {
318+
// stop saving session if we run out of context
319+
if (!path_session.empty() && session_save_all) {
321320
llama_save_session_file(ctx, path_session.c_str(),
322321
session_tokens.data(), session_tokens.size());
323322
}
@@ -621,7 +620,7 @@ int main(int argc, char ** argv) {
621620
}
622621
}
623622

624-
if (!path_session.empty() && resume_session) {
623+
if (!path_session.empty() && session_save_all) {
625624
fprintf(stderr, "\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
626625
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
627626
}

0 commit comments

Comments
 (0)