Skip to content

Commit d974aed

Browse files
committed
convert : print -> logging
ggml-ci
1 parent 26f606e commit d974aed

File tree

2 files changed

+17
-17
lines changed

2 files changed

+17
-17
lines changed

convert-hf-to-gguf-update.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -159,8 +159,8 @@ def get_vocab_base_pre(self, tokenizer) -> str:
159159
chktok = tokenizer.encode(chktxt)
160160
chkhsh = sha256(str(chktok).encode()).hexdigest()
161161
162-
print(f"chktok: {{chktok}}")
163-
print(f"chkhsh: {{chkhsh}}")
162+
logger.debug(f"chktok: {{chktok}}")
163+
logger.debug(f"chkhsh: {{chkhsh}}")
164164
165165
res = None
166166
@@ -169,22 +169,22 @@ def get_vocab_base_pre(self, tokenizer) -> str:
169169
# don't edit the hashes manually!
170170
{src_ifs}
171171
if res is None:
172-
print("\\n")
173-
print("**************************************************************************************")
174-
print("** WARNING: The BPE pre-tokenizer was not recognized!")
175-
print("** There are 2 possible reasons for this:")
176-
print("** - the model has not been added to convert-hf-to-gguf-update.py yet")
177-
print("** - the pre-tokenization config has changed upstream")
178-
print("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
179-
print("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
180-
print("**")
181-
print(f"** chkhsh: {{chkhsh}}")
182-
print("**************************************************************************************")
183-
print("\\n")
172+
logger.warning("\\n")
173+
logger.warning("**************************************************************************************")
174+
logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
175+
logger.warning("** There are 2 possible reasons for this:")
176+
logger.warning("** - the model has not been added to convert-hf-to-gguf-update.py yet")
177+
logger.warning("** - the pre-tokenization config has changed upstream")
178+
logger.warning("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
179+
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
180+
logger.warning("**")
181+
logger.warning(f"** chkhsh: {{chkhsh}}")
182+
logger.warning("**************************************************************************************")
183+
logger.warning("\\n")
184184
raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
185185
186-
print(f"tokenizer.ggml.pre: {{repr(res)}}")
187-
print(f"chkhsh: {{chkhsh}}")
186+
logger.debug(f"tokenizer.ggml.pre: {{repr(res)}}")
187+
logger.debug(f"chkhsh: {{chkhsh}}")
188188
189189
return res
190190
"""

convert-hf-to-gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ def get_vocab_base_pre(self, tokenizer) -> str:
327327
logger.warning("\n")
328328
raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
329329

330-
logger.debug(f"tokenizer.ggml.pre: {res}")
330+
logger.debug(f"tokenizer.ggml.pre: {repr(res)}")
331331
logger.debug(f"chkhsh: {chkhsh}")
332332

333333
return res

0 commit comments

Comments
 (0)