Skip to content

Commit 098c14e

Browse files
committed
Move print to logging: Fixes.
1 parent b16d543 commit 098c14e

File tree

1 file changed

+13
-12
lines changed

1 file changed

+13
-12
lines changed

convert_grok.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
"""
1212

1313
import argparse
14+
import logging
1415
import mmap
1516
import os
1617
import pathlib
@@ -34,8 +35,6 @@
3435

3536
import gguf
3637

37-
logger = logging.getLogger("convert_grok")
38-
3938
GGML_QK8_0 = 32
4039
GGML_QK4_0 = 32
4140
GGML_QK4_1 = 32
@@ -216,7 +215,7 @@ def dump_state_dict(f, ggml_type, input_dir, config):
216215
tensor_ggml_type,
217216
)
218217
weights[name] = weight, scales
219-
logger.info("Loaded", len(weight_names), "files")
218+
logging.debug("Loaded %i files", len(weight_names))
220219

221220
f.write_header_to_file()
222221
f.write_kv_data_to_file()
@@ -232,21 +231,23 @@ def dump_state_dict(f, ggml_type, input_dir, config):
232231
_, tensor_ggml_type = get_dtype_and_ggml_type(tensor, ggml_type)
233232
array = maybe_quantize_tensor(tensor, tensor_ggml_type).numpy()
234233

235-
logger.debug(
236-
f"dumping {name}:",
237-
f"{tensor_ggml_type.name}/{array.dtype}, {list(tensor.shape)}, {array.nbytes} bytes",
234+
logging.info(
235+
f"dumping {name}:"
236+
f"{tensor_ggml_type.name}/{array.dtype}, {list(tensor.shape)}, {array.nbytes} bytes"
238237
)
239238
f.write_tensor_data(array)
240239

241240
tensor_info.append((name, list(tensor.shape), tensor_ggml_type.name))
242241

243242
try:
244-
print(tabulate(tensor_info, headers=["name", "shape", "dtype"], tablefmt="psql")) # noqa: NP100
243+
print(
244+
tabulate(tensor_info, headers=["name", "shape", "dtype"], tablefmt="psql")
245+
) # noqa: NP100
245246
except NameError:
246247
pass
247248

248249
if len(tensor_info) != len(weight_names):
249-
logger.warning("Not all tensors are converted")
250+
logging.warning("Not all tensors are converted")
250251

251252

252253
def from_numpy(array):
@@ -379,7 +380,7 @@ def ffn_size(emb_size, widening_factor):
379380
config.num_experts = len(config.experts)
380381

381382
assert config.num_experts >= 2, "need at least 2 experts"
382-
logger.info("experts to export:", config.experts)
383+
logging.info("experts to export: %s", config.experts)
383384

384385
f = gguf.GGUFWriter(args.save_path, "grok", endianess=gguf.GGUFEndian.LITTLE)
385386

@@ -411,12 +412,12 @@ def ffn_size(emb_size, widening_factor):
411412

412413
delta = time.time() - start
413414

414-
logger.info(f"grok GGUF model saved to {args.save_path}. Total time {delta:.2f} sec")
415+
logging.info(f"grok GGUF model saved to {args.save_path}. Total time {delta:.2f} sec")
415416

416417

417418
def load_vocab(path):
418419
def load_spm(p):
419-
logger.info(f"Loading vocab file {p}")
420+
logging.info(f"Loading vocab file {p}")
420421
return SentencePieceVocab(p)
421422

422423
# Be extra-friendly and accept either a file or a directory. Also, if it's
@@ -452,7 +453,7 @@ def main():
452453
args = parser.parse_args()
453454

454455
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
455-
456+
456457
vocab = load_vocab(
457458
pathlib.Path(args.vocab_dir) if args.vocab_dir else pathlib.Path(args.input_dir)
458459
)

0 commit comments

Comments
 (0)