Skip to content

Commit 2dff6f4

Browse files
committed
log : simplify init
1 parent c426837 commit 2dff6f4

File tree

24 files changed

+43
-115
lines changed

24 files changed

+43
-115
lines changed

common/common.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,17 @@ bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREAD
362362
return true;
363363
}
364364

365+
void gpt_init() {
366+
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
367+
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
368+
gpt_log_add(gpt_log_main(), level, "%s", text);
369+
}
370+
}, NULL);
371+
372+
373+
LOG_INF("build: %d (%s) with %s for %s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
374+
}
375+
365376
std::string gpt_params_get_system_info(const gpt_params & params) {
366377
std::ostringstream os;
367378

common/common.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,9 @@ struct gpt_params {
339339
bool batched_bench_output_jsonl = false;
340340
};
341341

342+
// call once at the start of a program using common
343+
void gpt_init();
344+
342345
std::string gpt_params_get_system_info(const gpt_params & params);
343346

344347
bool parse_cpu_range(const std::string& range, bool(&boolmask)[GGML_MAX_N_THREADS]);

common/log.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ struct gpt_log_entry {
6767
if (level != GGML_LOG_LEVEL_NONE) {
6868
if (timestamp) {
6969
// [M.s.ms.us]
70-
fprintf(fcur, "" LOG_COL_BLUE "%05d.%02d.%03d.%03d" LOG_COL_DEFAULT " ",
70+
fprintf(fcur, "" LOG_COL_BLUE "%d.%02d.%03d.%03d" LOG_COL_DEFAULT " ",
7171
(int) (timestamp / 1000000 / 60),
7272
(int) (timestamp / 1000000 % 60),
7373
(int) (timestamp / 1000 % 1000),

examples/batched-bench/batched-bench.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,7 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
19-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
20-
gpt_log_add(gpt_log_main(), level, "%s", text);
21-
}
22-
}, NULL);
18+
gpt_init();
2319

2420
gpt_params params;
2521

examples/batched/batched.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,7 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
19-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
20-
gpt_log_add(gpt_log_main(), level, "%s", text);
21-
}
22-
}, NULL);
18+
gpt_log_init();
2319

2420
gpt_params params;
2521

examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -872,16 +872,13 @@ static std::string basename(const std::string &path) {
872872
}
873873

874874
int main(int argc, char ** argv) {
875-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
876-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
877-
gpt_log_add(gpt_log_main(), level, "%s", text);
878-
}
879-
}, NULL);
875+
gpt_init();
880876

881877
struct train_params params = get_default_train_params();
882878
if (!params_parse(argc, argv, &params)) {
883879
return 1;
884880
}
881+
885882
Config config;
886883
TransformerWeights weights = {};
887884
{

examples/embedding/embedding.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
7979
}
8080

8181
int main(int argc, char ** argv) {
82-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
83-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
84-
gpt_log_add(gpt_log_main(), level, "%s", text);
85-
}
86-
}, NULL);
82+
gpt_log_init();
8783

8884
gpt_params params;
8985

@@ -95,8 +91,6 @@ int main(int argc, char ** argv) {
9591
// For non-causal models, batch size must be equal to ubatch size
9692
params.n_ubatch = params.n_batch;
9793

98-
print_build_info();
99-
10094
llama_backend_init();
10195
llama_numa_init(params.numa);
10296

examples/eval-callback/eval-callback.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -140,11 +140,7 @@ static bool run(llama_context * ctx, const gpt_params & params) {
140140
}
141141

142142
int main(int argc, char ** argv) {
143-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
144-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
145-
gpt_log_add(gpt_log_main(), level, "%s", text);
146-
}
147-
}, NULL);
143+
gpt_init();
148144

149145
callback_data cb_data;
150146

@@ -154,8 +150,6 @@ int main(int argc, char ** argv) {
154150
return 1;
155151
}
156152

157-
print_build_info();
158-
159153
llama_backend_init();
160154
llama_numa_init(params.numa);
161155

examples/imatrix/imatrix.cpp

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -543,7 +543,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
543543

544544
if (params.compute_ppl) {
545545
const int first = n_ctx/2;
546-
const auto all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
546+
const auto * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
547547
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
548548
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
549549
count += n_ctx - first - 1;
@@ -573,11 +573,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
573573
}
574574

575575
int main(int argc, char ** argv) {
576-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
577-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
578-
gpt_log_add(gpt_log_main(), level, "%s", text);
579-
}
580-
}, NULL);
576+
gpt_init();
581577

582578
gpt_params params;
583579

examples/infill/infill.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -104,11 +104,7 @@ static void sigint_handler(int signo) {
104104
#endif
105105

106106
int main(int argc, char ** argv) {
107-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
108-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
109-
gpt_log_add(gpt_log_main(), level, "%s", text);
110-
}
111-
}, NULL);
107+
gpt_init();
112108

113109
gpt_params params;
114110
g_params = &params;
@@ -159,8 +155,6 @@ int main(int argc, char ** argv) {
159155
LOG_WRN("%s: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
160156
}
161157

162-
print_build_info();
163-
164158
LOG_INF("%s: llama backend init\n", __func__);
165159
llama_backend_init();
166160
llama_numa_init(params.numa);

0 commit comments

Comments
 (0)