Skip to content

Commit cc79499

Browse files
ggerganovdsx1986
authored andcommitted
log : add CONT level for continuing previous log entry (ggml-org#9610)
1 parent d016542 commit cc79499

File tree

7 files changed

+29
-24
lines changed

7 files changed

+29
-24
lines changed

common/log.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ struct gpt_log_entry {
8282
}
8383
}
8484

85-
if (level != GGML_LOG_LEVEL_NONE && prefix) {
85+
if (level != GGML_LOG_LEVEL_NONE && level != GGML_LOG_LEVEL_CONT && prefix) {
8686
if (timestamp) {
8787
// [M.s.ms.us]
8888
fprintf(fcur, "%s%d.%02d.%03d.%03d%s ",

common/log.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,10 @@ void gpt_log_set_timestamps(struct gpt_log * log, bool timestamps); // w
8383
#define LOG_WRN(...) LOG_TMPL(GGML_LOG_LEVEL_WARN, 0, __VA_ARGS__)
8484
#define LOG_ERR(...) LOG_TMPL(GGML_LOG_LEVEL_ERROR, 0, __VA_ARGS__)
8585
#define LOG_DBG(...) LOG_TMPL(GGML_LOG_LEVEL_DEBUG, LOG_DEFAULT_DEBUG, __VA_ARGS__)
86+
#define LOG_CNT(...) LOG_TMPL(GGML_LOG_LEVEL_CONT, 0, __VA_ARGS__)
8687

8788
#define LOG_INFV(verbosity, ...) LOG_TMPL(GGML_LOG_LEVEL_INFO, verbosity, __VA_ARGS__)
8889
#define LOG_WRNV(verbosity, ...) LOG_TMPL(GGML_LOG_LEVEL_WARN, verbosity, __VA_ARGS__)
8990
#define LOG_ERRV(verbosity, ...) LOG_TMPL(GGML_LOG_LEVEL_ERROR, verbosity, __VA_ARGS__)
9091
#define LOG_DBGV(verbosity, ...) LOG_TMPL(GGML_LOG_LEVEL_DEBUG, verbosity, __VA_ARGS__)
92+
#define LOG_CNTV(verbosity, ...) LOG_TMPL(GGML_LOG_LEVEL_CONT, verbosity, __VA_ARGS__)

examples/infill/infill.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -263,9 +263,9 @@ int main(int argc, char ** argv) {
263263
if (params.n_keep > 0) {
264264
LOG_INF("%s: static prompt based on n_keep: '", __func__);
265265
for (int i = 0; i < params.n_keep; i++) {
266-
LOG("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
266+
LOG_CNT("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
267267
}
268-
LOG("'\n");
268+
LOG_CNT("'\n");
269269
}
270270
LOG_INF("\n");
271271
}
@@ -306,8 +306,8 @@ int main(int argc, char ** argv) {
306306

307307
LOG_INF("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
308308

309-
LOG("\n");
310-
LOG("\n##### Infill mode #####\n\n");
309+
LOG_INF("\n");
310+
LOG_INF("\n##### Infill mode #####\n\n");
311311
if (params.interactive) {
312312
const char *control_message;
313313
if (params.multiline_input) {
@@ -318,11 +318,11 @@ int main(int argc, char ** argv) {
318318
" - To return control without starting a new line, end your input with '/'.\n"
319319
" - If you want to submit another line, end your input with '\\'.\n";
320320
}
321-
LOG("== Running in interactive mode. ==\n");
321+
LOG_INF("== Running in interactive mode. ==\n");
322322
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
323-
LOG( " - Press Ctrl+C to interject at any time.\n");
323+
LOG_INF( " - Press Ctrl+C to interject at any time.\n");
324324
#endif
325-
LOG( "%s\n", control_message);
325+
LOG_INF( "%s\n", control_message);
326326

327327
is_interacting = params.interactive_first;
328328
}

examples/main/main.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -385,9 +385,9 @@ int main(int argc, char ** argv) {
385385
if (params.n_keep > add_bos) {
386386
LOG_INF("%s: static prompt based on n_keep: '", __func__);
387387
for (int i = 0; i < params.n_keep; i++) {
388-
LOG("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
388+
LOG_CNT("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
389389
}
390-
LOG("'\n");
390+
LOG_CNT("'\n");
391391
}
392392
LOG_INF("\n");
393393
}
@@ -409,40 +409,40 @@ int main(int argc, char ** argv) {
409409
}
410410

411411
if (params.interactive) {
412-
LOG("%s: interactive mode on.\n", __func__);
412+
LOG_INF("%s: interactive mode on.\n", __func__);
413413

414414
if (!params.antiprompt.empty()) {
415415
for (const auto & antiprompt : params.antiprompt) {
416-
LOG("Reverse prompt: '%s'\n", antiprompt.c_str());
416+
LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
417417
if (params.verbose_prompt) {
418418
auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
419419
for (int i = 0; i < (int) tmp.size(); i++) {
420-
LOG("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
420+
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
421421
}
422422
}
423423
}
424424
}
425425

426426
if (params.input_prefix_bos) {
427-
LOG("Input prefix with BOS\n");
427+
LOG_INF("Input prefix with BOS\n");
428428
}
429429

430430
if (!params.input_prefix.empty()) {
431-
LOG("Input prefix: '%s'\n", params.input_prefix.c_str());
431+
LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
432432
if (params.verbose_prompt) {
433433
auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
434434
for (int i = 0; i < (int) tmp.size(); i++) {
435-
LOG("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
435+
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
436436
}
437437
}
438438
}
439439

440440
if (!params.input_suffix.empty()) {
441-
LOG("Input suffix: '%s'\n", params.input_suffix.c_str());
441+
LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
442442
if (params.verbose_prompt) {
443443
auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
444444
for (int i = 0; i < (int) tmp.size(); i++) {
445-
LOG("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
445+
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
446446
}
447447
}
448448
}
@@ -474,7 +474,7 @@ int main(int argc, char ** argv) {
474474
//GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
475475
LOG_INF("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
476476
}
477-
LOG("\n");
477+
LOG_INF("\n");
478478

479479
if (params.interactive) {
480480
const char * control_message;
@@ -486,11 +486,11 @@ int main(int argc, char ** argv) {
486486
" - To return control without starting a new line, end your input with '/'.\n"
487487
" - If you want to submit another line, end your input with '\\'.\n";
488488
}
489-
LOG("== Running in interactive mode. ==\n");
489+
LOG_INF("== Running in interactive mode. ==\n");
490490
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
491-
LOG( " - Press Ctrl+C to interject at any time.\n");
491+
LOG_INF( " - Press Ctrl+C to interject at any time.\n");
492492
#endif
493-
LOG( "%s\n", control_message);
493+
LOG_INF( "%s\n", control_message);
494494

495495
is_interacting = params.interactive_first;
496496
}

ggml/include/ggml.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -570,6 +570,7 @@ extern "C" {
570570
GGML_LOG_LEVEL_WARN = 2,
571571
GGML_LOG_LEVEL_ERROR = 3,
572572
GGML_LOG_LEVEL_DEBUG = 4,
573+
GGML_LOG_LEVEL_CONT = 5, // continue previous log
573574
};
574575

575576
// this tensor...

src/llama-impl.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void *
2828
#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
2929
#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
3030
#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
31+
#define LLAMA_LOG_DEBUG(...) llama_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
32+
#define LLAMA_LOG_CONT(...) llama_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__)
3133

3234
//
3335
// helpers

src/llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18671,9 +18671,9 @@ struct llama_model * llama_load_model_from_file(
1867118671
unsigned percentage = (unsigned) (100 * progress);
1867218672
while (percentage > *cur_percentage_p) {
1867318673
*cur_percentage_p = percentage;
18674-
LLAMA_LOG(".");
18674+
LLAMA_LOG_CONT(".");
1867518675
if (percentage >= 100) {
18676-
LLAMA_LOG("\n");
18676+
LLAMA_LOG_CONT("\n");
1867718677
}
1867818678
}
1867918679
return true;

0 commit comments

Comments
 (0)