Skip to content

Commit fbdfefe

Browse files
authored
llama : gemma3 : use output tensor if it exists in model weight (#12506)
* llama : gemma3 : use output tensor if it exists in model weight * also add to the llm_tensor_names
1 parent ba932df commit fbdfefe

File tree

3 files changed

+8
-1
lines changed

3 files changed

+8
-1
lines changed

gguf-py/gguf/constants.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,6 +1113,7 @@ class MODEL_TENSOR(IntEnum):
11131113
],
11141114
MODEL_ARCH.GEMMA3: [
11151115
MODEL_TENSOR.TOKEN_EMBD,
1116+
MODEL_TENSOR.OUTPUT,
11161117
MODEL_TENSOR.OUTPUT_NORM,
11171118
MODEL_TENSOR.ATTN_Q,
11181119
MODEL_TENSOR.ATTN_Q_NORM,

src/llama-arch.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -778,6 +778,7 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
778778
{
779779
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
780780
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
781+
{ LLM_TENSOR_OUTPUT, "output" },
781782
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
782783
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
783784
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },

src/llama-model.cpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2571,7 +2571,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
25712571

25722572
// output
25732573
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2574-
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
2574+
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2575+
2576+
// if output is NULL, init from the input tok embed
2577+
if (output == NULL) {
2578+
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2579+
}
25752580

25762581
for (int i = 0; i < n_layer; ++i) {
25772582
auto & layer = layers[i];

0 commit comments

Comments
 (0)