Skip to content

Commit 17a5e9f

Browse files
author
Joan Martinez
committed
fix: fix the usage of the code model
1 parent 21936dd commit 17a5e9f

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

convert-hf-to-gguf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2442,10 +2442,10 @@ def get_tensors(self):
24422442
if 'gated_layer' in name:
24432443
d1 = data[:self.intermediate_size, :]
24442444
name1 = name.replace('gated_layers', 'gated_layers_w')
2445-
name1 = name.replace('up_gated_layer', 'gated_layers_w')
2445+
name1 = name.replace('up_gated_layer', 'gated_layers_v')
24462446
d2 = data[self.intermediate_size:, :]
24472447
name2 = name.replace('gated_layers', 'gated_layers_v')
2448-
name2 = name.replace('up_gated_layer', 'gated_layers_v')
2448+
name2 = name.replace('up_gated_layer', 'gated_layers_w')
24492449
yield name1, d1
24502450
yield name2, d2
24512451
continue

llama.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8506,6 +8506,8 @@ struct llm_build_context {
85068506
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
85078507

85088508
if (model.layers[il].attn_norm_2 != nullptr) {
8509+
// re-add the layer input
8510+
cur = ggml_add(ctx0, cur, inpL);
85098511
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, cb, il);
85108512
}
85118513

0 commit comments

Comments
 (0)