We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a6956b2 commit 41318d7Copy full SHA for 41318d7
llama.cpp
@@ -856,7 +856,7 @@ static bool llama_eval_internal(
856
// for big prompts, if BLAS is enabled, it is better to use only one thread
857
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
858
ggml_cgraph gf = {};
859
- gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads;
+ gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads;
860
861
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
862
memcpy(embd->data, tokens, N*ggml_element_size(embd));
0 commit comments