Skip to content

Commit 9e4cf37

Browse files
committed
convert-hf-to-gguf.py: print --> logger.debug or ValueError()
1 parent 70d4f42 commit 9e4cf37

File tree

1 file changed

+9
-13
lines changed

1 file changed

+9
-13
lines changed

convert-hf-to-gguf.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1236,8 +1236,7 @@ def write_tensors(self):
12361236
# map tensor names
12371237
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
12381238
if new_name is None:
1239-
print(f"Can not map tensor {name!r}")
1240-
sys.exit()
1239+
raise ValueError(f"Can not map tensor {name!r}")
12411240

12421241
n_dims = len(data.shape)
12431242
data_dtype = data.dtype
@@ -1254,7 +1253,7 @@ def write_tensors(self):
12541253
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
12551254
data = data.astype(np.float16)
12561255

1257-
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
1256+
logger.debug(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
12581257

12591258
self.gguf_writer.add_tensor(new_name, data)
12601259

@@ -1270,16 +1269,15 @@ def _stack_qk_norm(self, block_count, name, tensor_map, n_head, norms, n_dims, l
12701269
merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight"
12711270
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
12721271
if new_name is None:
1273-
print(f"Can not map tensor {name!r}")
1274-
sys.exit()
1272+
raise ValueError(f"Can not map tensor {name!r}")
12751273
if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
12761274
data = data.astype(np.float32)
12771275

12781276
# if f16 desired, convert any float32 2-dim weight tensors to float16
12791277
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2:
12801278
data = data.astype(np.float16)
12811279

1282-
print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
1280+
logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
12831281

12841282
self.gguf_writer.add_tensor(new_name, data)
12851283

@@ -1827,19 +1825,17 @@ def write_tensors(self):
18271825

18281826
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
18291827
if new_name is None:
1830-
print(f"Can not map tensor {name!r}")
1831-
sys.exit()
1828+
raise ValueError(f"Can not map tensor {name!r}")
18321829

1833-
print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
1830+
logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
18341831

18351832
self.gguf_writer.add_tensor(new_name, data)
18361833
continue
18371834

18381835
# map tensor names
18391836
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
18401837
if new_name is None:
1841-
print(f"Can not map tensor {name!r}")
1842-
sys.exit()
1838+
raise ValueError(f"Can not map tensor {name!r}")
18431839

18441840
n_dims = len(data.shape)
18451841
data_dtype = data.dtype
@@ -1856,7 +1852,7 @@ def write_tensors(self):
18561852
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
18571853
data = data.astype(np.float16)
18581854

1859-
print(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
1855+
logger.debug(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
18601856

18611857
self.gguf_writer.add_tensor(new_name, data)
18621858

@@ -2439,7 +2435,7 @@ def write_tensors(self):
24392435
# lm_head is not used in llama.cpp, while autoawq will include this tensor in model
24402436
# To prevent errors, skip loading lm_head.weight.
24412437
if name == "lm_head.weight":
2442-
print(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
2438+
logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.")
24432439
continue
24442440

24452441
old_dtype = data_torch.dtype

0 commit comments

Comments
 (0)