Skip to content

Commit 9dede37

Browse files
committed
llama : remove unused vars (#4796)
1 parent 3c36213 commit 9dede37

File tree

1 file changed

+0
-2
lines changed

1 file changed

+0
-2
lines changed

llama.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4997,7 +4997,6 @@ struct llm_build_context {
49974997
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
49984998

49994999
const int64_t n_embd_head = hparams.n_embd_head_v;
5000-
const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
50015000
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
50025001

50035002
const int64_t n_rot = n_embd_head_k / 2;
@@ -5210,7 +5209,6 @@ struct llm_build_context {
52105209
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
52115210

52125211
const int64_t n_embd_head = hparams.n_embd_head_v;
5213-
const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
52145212
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
52155213

52165214
struct ggml_tensor * cur;

0 commit comments

Comments
 (0)