We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0463028 commit 3989b29Copy full SHA for 3989b29
examples/talk-llama/talk-llama.cpp
@@ -266,6 +266,9 @@ int main(int argc, char ** argv) {
266
llama_backend_init(true);
267
268
auto lmparams = llama_model_default_params();
269
+ if (!params.use_gpu) {
270
+ lcparams.lmparams = 0;
271
+ }
272
273
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
274
@@ -276,9 +279,6 @@ int main(int argc, char ** argv) {
276
279
lcparams.seed = 1;
277
280
lcparams.f16_kv = true;
278
281
lcparams.n_threads = params.n_threads;
- if (!params.use_gpu) {
- lcparams.n_gpu_layers = 0;
- }
282
283
struct llama_context * ctx_llama = llama_new_context_with_model(model_llama, lcparams);
284
0 commit comments