From 13a39058d3d8bb8aaaf1a981e90d6edf629dc944 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sun, 10 Mar 2024 19:50:11 +0100 Subject: [PATCH] quantize: fix F16/F32 downcast to q6_K --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 249442166d24e..68694965b986f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11675,7 +11675,7 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) { new_type = GGML_TYPE_Q5_K; } - else if (new_type != GGML_TYPE_Q8_0) { + else if (new_type != GGML_TYPE_Q8_0 && new_type != GGML_TYPE_F16 && new_type != GGML_TYPE_F32) { new_type = GGML_TYPE_Q6_K; } } else if (name == "token_embd.weight") {