Skip to content

Commit ced44be

Browse files
authored
llama-chat : fix wrong template in GLM4-0414 (#13140)
* fix wrong template in GLM4-0414 * fix spaces * no bos token since it is already in the template * moved the chatgml4 check to higher priority * restored template for old GLM models * moved the GLM4 template check in the correct place with correct check
1 parent e291450 commit ced44be

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

convert_hf_to_gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5154,7 +5154,7 @@ def set_vocab(self):
51545154
special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
51555155
special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
51565156
special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
5157-
special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["[gMASK]"])
5157+
special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"])
51585158
special_vocab.add_to_gguf(self.gguf_writer)
51595159

51605160
def set_gguf_parameters(self):

src/llama-chat.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
122122
}
123123
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
124124
return LLM_CHAT_TEMPLATE_PHI_3;
125+
} else if (tmpl_contains("[gMASK]<sop>")) {
126+
return LLM_CHAT_TEMPLATE_CHATGML_4;
125127
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
126128
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
127129
} else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) {
@@ -155,8 +157,6 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
155157
} else if (tmpl_contains("[gMASK]sop")) {
156158
// chatglm3-6b
157159
return LLM_CHAT_TEMPLATE_CHATGML_3;
158-
} else if (tmpl_contains("[gMASK]<sop>")) {
159-
return LLM_CHAT_TEMPLATE_CHATGML_4;
160160
} else if (tmpl_contains(LU8("<用户>"))) {
161161
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
162162
return LLM_CHAT_TEMPLATE_MINICPM;

0 commit comments

Comments
 (0)