Skip to content

Commit d92f6b4

Browse files
committed
llama : change fallback type IQ4_NL -> Q4_0
ggml-ci
1 parent 0da3fd2 commit d92f6b4

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/llama.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17979,10 +17979,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1797917979
case GGML_TYPE_IQ1_M:
1798017980
case GGML_TYPE_Q2_K:
1798117981
case GGML_TYPE_Q3_K:
17982-
case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
17983-
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
17984-
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
17985-
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
17982+
case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_Q4_0; break;
17983+
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
17984+
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
17985+
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
1798617986
default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
1798717987
}
1798817988
LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));

0 commit comments

Comments
 (0)