Skip to content

Commit 52bb63c

Browse files
authored
refactor : switch to emplace_back to avoid extra object (#5291)
1 parent 1ec3332 commit 52bb63c

File tree

8 files changed

+36
-36
lines changed

8 files changed

+36
-36
lines changed

common/common.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -515,7 +515,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
515515
invalid_param = true;
516516
break;
517517
}
518-
params.lora_adapter.push_back(std::make_tuple(argv[i], 1.0f));
518+
params.lora_adapter.emplace_back(argv[i], 1.0f);
519519
params.use_mmap = false;
520520
} else if (arg == "--lora-scaled") {
521521
if (++i >= argc) {
@@ -527,7 +527,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
527527
invalid_param = true;
528528
break;
529529
}
530-
params.lora_adapter.push_back(std::make_tuple(lora_adapter, std::stof(argv[i])));
530+
params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i]));
531531
params.use_mmap = false;
532532
} else if (arg == "--lora-base") {
533533
if (++i >= argc) {
@@ -664,7 +664,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
664664
invalid_param = true;
665665
break;
666666
}
667-
params.antiprompt.push_back(argv[i]);
667+
params.antiprompt.emplace_back(argv[i]);
668668
} else if (arg == "-ld" || arg == "--logdir") {
669669
if (++i >= argc) {
670670
invalid_param = true;
@@ -880,7 +880,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
880880
}
881881

882882
if (!params.kv_overrides.empty()) {
883-
params.kv_overrides.emplace_back(llama_model_kv_override());
883+
params.kv_overrides.emplace_back();
884884
params.kv_overrides.back().key[0] = 0;
885885
}
886886

examples/llama-bench/llama-bench.cpp

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -948,46 +948,46 @@ struct markdown_printer : public printer {
948948

949949
void print_header(const cmd_params & params) override {
950950
// select fields to print
951-
fields.push_back("model");
952-
fields.push_back("size");
953-
fields.push_back("params");
954-
fields.push_back("backend");
951+
fields.emplace_back("model");
952+
fields.emplace_back("size");
953+
fields.emplace_back("params");
954+
fields.emplace_back("backend");
955955
bool is_cpu_backend = test::get_backend() == "CPU" || test::get_backend() == "BLAS";
956956
if (!is_cpu_backend) {
957-
fields.push_back("n_gpu_layers");
957+
fields.emplace_back("n_gpu_layers");
958958
}
959959
if (params.n_threads.size() > 1 || params.n_threads != cmd_params_defaults.n_threads || is_cpu_backend) {
960-
fields.push_back("n_threads");
960+
fields.emplace_back("n_threads");
961961
}
962962
if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) {
963-
fields.push_back("n_batch");
963+
fields.emplace_back("n_batch");
964964
}
965965
if (params.type_k.size() > 1 || params.type_k != cmd_params_defaults.type_k) {
966-
fields.push_back("type_k");
966+
fields.emplace_back("type_k");
967967
}
968968
if (params.type_v.size() > 1 || params.type_v != cmd_params_defaults.type_v) {
969-
fields.push_back("type_v");
969+
fields.emplace_back("type_v");
970970
}
971971
if (params.main_gpu.size() > 1 || params.main_gpu != cmd_params_defaults.main_gpu) {
972-
fields.push_back("main_gpu");
972+
fields.emplace_back("main_gpu");
973973
}
974974
if (params.split_mode.size() > 1 || params.split_mode != cmd_params_defaults.split_mode) {
975-
fields.push_back("split_mode");
975+
fields.emplace_back("split_mode");
976976
}
977977
if (params.mul_mat_q.size() > 1 || params.mul_mat_q != cmd_params_defaults.mul_mat_q) {
978-
fields.push_back("mul_mat_q");
978+
fields.emplace_back("mul_mat_q");
979979
}
980980
if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) {
981-
fields.push_back("no_kv_offload");
981+
fields.emplace_back("no_kv_offload");
982982
}
983983
if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) {
984-
fields.push_back("tensor_split");
984+
fields.emplace_back("tensor_split");
985985
}
986986
if (params.use_mmap.size() > 1 || params.use_mmap != cmd_params_defaults.use_mmap) {
987-
fields.push_back("use_mmap");
987+
fields.emplace_back("use_mmap");
988988
}
989-
fields.push_back("test");
990-
fields.push_back("t/s");
989+
fields.emplace_back("test");
990+
fields.emplace_back("t/s");
991991

992992
fprintf(fout, "|");
993993
for (const auto & field : fields) {

examples/main/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -352,12 +352,12 @@ int main(int argc, char ** argv) {
352352
// in instruct mode, we inject a prefix and a suffix to each input by the user
353353
if (params.instruct) {
354354
params.interactive_first = true;
355-
params.antiprompt.push_back("### Instruction:\n\n");
355+
params.antiprompt.emplace_back("### Instruction:\n\n");
356356
}
357357
// similar for chatml mode
358358
else if (params.chatml) {
359359
params.interactive_first = true;
360-
params.antiprompt.push_back("<|im_start|>user\n");
360+
params.antiprompt.emplace_back("<|im_start|>user\n");
361361
}
362362

363363
// enable interactive mode if interactive start is specified

examples/perplexity/perplexity.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -881,7 +881,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
881881
size_t li = hs_cur.common_prefix;
882882
for (int s = 0; s < 4; ++s) {
883883
for (size_t j = hs_cur.common_prefix; j < hs_cur.seq_tokens[s].size() - 1; j++) {
884-
eval_pairs.push_back(std::make_pair(hs_cur.i_batch + li++, hs_cur.seq_tokens[s][j + 1]));
884+
eval_pairs.emplace_back(hs_cur.i_batch + li++, hs_cur.seq_tokens[s][j + 1]);
885885
}
886886
++li;
887887
}
@@ -1159,13 +1159,13 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
11591159
const int last_1st = task.seq_tokens[0].size() - n_base1 > 1 ? 1 : 0;
11601160
size_t li = n_base1 - 1;
11611161
for (size_t j = n_base1-1; j < task.seq_tokens[0].size()-1-last_1st; ++j) {
1162-
eval_pairs.push_back(std::make_pair(task.i_batch + li++, task.seq_tokens[0][j+1]));
1162+
eval_pairs.emplace_back(task.i_batch + li++, task.seq_tokens[0][j+1]);
11631163
}
11641164
const auto& n_base2 = skip_choice ? task.n_base2 : task.common_prefix;
11651165
const int last_2nd = task.seq_tokens[1].size() - n_base2 > 1 ? 1 : 0;
11661166
li = task.seq_tokens[0].size() - task.common_prefix + n_base2 - 1;
11671167
for (size_t j = n_base2-1; j < task.seq_tokens[1].size()-1-last_2nd; ++j) {
1168-
eval_pairs.push_back(std::make_pair(task.i_batch + li++, task.seq_tokens[1][j+1]));
1168+
eval_pairs.emplace_back(task.i_batch + li++, task.seq_tokens[1][j+1]);
11691169
}
11701170
}
11711171
compute_logprobs(batch_logits.data(), n_vocab, workers, eval_pairs, eval_results);
@@ -1524,7 +1524,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
15241524
size_t li = cur_task.common_prefix;
15251525
for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) {
15261526
for (size_t j = cur_task.common_prefix; j < cur_task.seq_tokens[s].size() - 1; j++) {
1527-
eval_pairs.push_back(std::make_pair(cur_task.i_batch + li++, cur_task.seq_tokens[s][j + 1]));
1527+
eval_pairs.emplace_back(cur_task.i_batch + li++, cur_task.seq_tokens[s][j + 1]);
15281528
}
15291529
++li;
15301530
}

examples/quantize-stats/quantize-stats.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -257,13 +257,13 @@ int main(int argc, char ** argv) {
257257
invalid_param = true;
258258
break;
259259
}
260-
params.include_layers.push_back(argv[i]);
260+
params.include_layers.emplace_back(argv[i]);
261261
} else if (arg == "-L" || arg == "--exclude-layer") {
262262
if (++i >= argc) {
263263
invalid_param = true;
264264
break;
265265
}
266-
params.exclude_layers.push_back(argv[i]);
266+
params.exclude_layers.emplace_back(argv[i]);
267267
} else if (arg == "-t" || arg == "--type") {
268268
if (++i >= argc) {
269269
invalid_param = true;

examples/quantize/quantize.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -208,13 +208,13 @@ int main(int argc, char ** argv) {
208208
}
209209
} else if (strcmp(argv[arg_idx], "--include-weights") == 0) {
210210
if (arg_idx < argc-1) {
211-
included_weights.push_back(argv[++arg_idx]);
211+
included_weights.emplace_back(argv[++arg_idx]);
212212
} else {
213213
usage(argv[0]);
214214
}
215215
} else if (strcmp(argv[arg_idx], "--exclude-weights") == 0) {
216216
if (arg_idx < argc-1) {
217-
excluded_weights.push_back(argv[++arg_idx]);
217+
excluded_weights.emplace_back(argv[++arg_idx]);
218218
} else {
219219
usage(argv[0]);
220220
}

examples/server/server.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1884,7 +1884,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
18841884
invalid_param = true;
18851885
break;
18861886
}
1887-
sparams.api_keys.push_back(argv[i]);
1887+
sparams.api_keys.emplace_back(argv[i]);
18881888
}
18891889
else if (arg == "--api-key-file")
18901890
{
@@ -2160,7 +2160,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
21602160
invalid_param = true;
21612161
break;
21622162
}
2163-
params.lora_adapter.push_back(std::make_tuple(argv[i], 1.0f));
2163+
params.lora_adapter.emplace_back(argv[i], 1.0f);
21642164
params.use_mmap = false;
21652165
}
21662166
else if (arg == "--lora-scaled")
@@ -2176,7 +2176,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
21762176
invalid_param = true;
21772177
break;
21782178
}
2179-
params.lora_adapter.push_back(std::make_tuple(lora_adapter, std::stof(argv[i])));
2179+
params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i]));
21802180
params.use_mmap = false;
21812181
}
21822182
else if (arg == "--lora-base")
@@ -2318,7 +2318,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
23182318
}
23192319
}
23202320
if (!params.kv_overrides.empty()) {
2321-
params.kv_overrides.emplace_back(llama_model_kv_override());
2321+
params.kv_overrides.emplace_back();
23222322
params.kv_overrides.back().key[0] = 0;
23232323
}
23242324

tests/test-llama-grammar.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ int main()
105105

106106
for (auto rule : expected_rules)
107107
{
108-
parsed_grammar.rules.push_back({});
108+
parsed_grammar.rules.emplace_back();
109109
for (auto element : rule)
110110
{
111111
parsed_grammar.rules.back().push_back(element);

0 commit comments

Comments
 (0)