Skip to content

Commit dbadcdd

Browse files
committed
harmonize formatting of tensor type conditions
1 parent ce86019 commit dbadcdd

File tree

1 file changed

+3
-6
lines changed

1 file changed

+3
-6
lines changed

src/llama.cpp

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16275,8 +16275,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1627516275
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
1627616276
}
1627716277
++qs.i_attention_wo;
16278-
}
16279-
else if (name.find("attn_qkv.weight") != std::string::npos) {
16278+
} else if (name.find("attn_qkv.weight") != std::string::npos) {
1628016279
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q2_K_L) {
1628116280
new_type = GGML_TYPE_Q3_K;
1628216281
}
@@ -16300,8 +16299,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1630016299
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) new_type = GGML_TYPE_Q4_K;
1630116300
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_XSR || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXXL) new_type = GGML_TYPE_Q5_K;
1630216301
++qs.i_attention_wv;
16303-
}
16304-
else if (name.find("ffn_gate") != std::string::npos) {
16302+
} else if (name.find("ffn_gate") != std::string::npos) {
1630516303
auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
1630616304
int i_layer = info.first, n_layer = info.second;
1630716305
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_L && (difquant_half_tensors(i_layer, n_layer))) new_type = GGML_TYPE_Q3_K;
@@ -16319,8 +16317,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1631916317
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXL && (difquant_half_tensors(i_layer, n_layer))) new_type = GGML_TYPE_IQ4_XS;
1632016318
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXXL && (difquant_six_eights_tensors(i_layer, n_layer))) new_type = GGML_TYPE_IQ4_XS;
1632116319
++qs.i_ffn_gate;
16322-
}
16323-
else if (name.find("ffn_up") != std::string::npos) {
16320+
} else if (name.find("ffn_up") != std::string::npos) {
1632416321
auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
1632516322
int i_layer = info.first, n_layer = info.second;
1632616323
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_L && (difquant_half_tensors(i_layer, n_layer))) new_type = GGML_TYPE_Q3_K;

0 commit comments

Comments
 (0)