@@ -1236,8 +1236,7 @@ def write_tensors(self):
1236
1236
# map tensor names
1237
1237
new_name = tensor_map .get_name (name , try_suffixes = (".weight" , ".bias" ))
1238
1238
if new_name is None :
1239
- print (f"Can not map tensor { name !r} " )
1240
- sys .exit ()
1239
+ raise ValueError (f"Can not map tensor { name !r} " )
1241
1240
1242
1241
n_dims = len (data .shape )
1243
1242
data_dtype = data .dtype
@@ -1254,7 +1253,7 @@ def write_tensors(self):
1254
1253
if self .ftype == 1 and data_dtype == np .float32 and name .endswith (".weight" ) and not new_name .endswith ("_norm.weight" ) and n_dims == 2 :
1255
1254
data = data .astype (np .float16 )
1256
1255
1257
- print (f"{ new_name } , n_dims = { n_dims } , { old_dtype } --> { data .dtype } " )
1256
+ logger . debug (f"{ new_name } , n_dims = { n_dims } , { old_dtype } --> { data .dtype } " )
1258
1257
1259
1258
self .gguf_writer .add_tensor (new_name , data )
1260
1259
@@ -1270,16 +1269,15 @@ def _stack_qk_norm(self, block_count, name, tensor_map, n_head, norms, n_dims, l
1270
1269
merged_name = f"model.layers.{ bid } .self_attn.{ layer_name } .weight"
1271
1270
new_name = tensor_map .get_name (merged_name , try_suffixes = (".weight" , ".bias" ))
1272
1271
if new_name is None :
1273
- print (f"Can not map tensor { name !r} " )
1274
- sys .exit ()
1272
+ raise ValueError (f"Can not map tensor { name !r} " )
1275
1273
if self .ftype == 1 and data_dtype == np .float16 and (n_dims == 1 or new_name .endswith ("_norm.weight" )):
1276
1274
data = data .astype (np .float32 )
1277
1275
1278
1276
# if f16 desired, convert any float32 2-dim weight tensors to float16
1279
1277
if self .ftype == 1 and data_dtype == np .float32 and name .endswith (".weight" ) and not new_name .endswith ("_norm.weight" ) and n_dims == 2 :
1280
1278
data = data .astype (np .float16 )
1281
1279
1282
- print (f"{ new_name } , n_dims = { len (data .shape )} , shape = { data .shape } --> { data .dtype } " )
1280
+ logger . debug (f"{ new_name } , n_dims = { len (data .shape )} , shape = { data .shape } --> { data .dtype } " )
1283
1281
1284
1282
self .gguf_writer .add_tensor (new_name , data )
1285
1283
@@ -1827,19 +1825,17 @@ def write_tensors(self):
1827
1825
1828
1826
new_name = tensor_map .get_name (merged_name , try_suffixes = (".weight" , ".bias" ))
1829
1827
if new_name is None :
1830
- print (f"Can not map tensor { name !r} " )
1831
- sys .exit ()
1828
+ raise ValueError (f"Can not map tensor { name !r} " )
1832
1829
1833
- print (f"{ new_name } , n_dims = { len (data .shape )} , shape = { data .shape } --> { data .dtype } " )
1830
+ logger . debug (f"{ new_name } , n_dims = { len (data .shape )} , shape = { data .shape } --> { data .dtype } " )
1834
1831
1835
1832
self .gguf_writer .add_tensor (new_name , data )
1836
1833
continue
1837
1834
1838
1835
# map tensor names
1839
1836
new_name = tensor_map .get_name (name , try_suffixes = (".weight" , ".bias" ))
1840
1837
if new_name is None :
1841
- print (f"Can not map tensor { name !r} " )
1842
- sys .exit ()
1838
+ raise ValueError (f"Can not map tensor { name !r} " )
1843
1839
1844
1840
n_dims = len (data .shape )
1845
1841
data_dtype = data .dtype
@@ -1856,7 +1852,7 @@ def write_tensors(self):
1856
1852
if self .ftype == 1 and data_dtype == np .float32 and name .endswith (".weight" ) and n_dims == 2 :
1857
1853
data = data .astype (np .float16 )
1858
1854
1859
- print (f"{ new_name } , n_dims = { n_dims } , shape = { data .shape } , { old_dtype } --> { data .dtype } " )
1855
+ logger . debug (f"{ new_name } , n_dims = { n_dims } , shape = { data .shape } , { old_dtype } --> { data .dtype } " )
1860
1856
1861
1857
self .gguf_writer .add_tensor (new_name , data )
1862
1858
@@ -2439,7 +2435,7 @@ def write_tensors(self):
2439
2435
# lm_head is not used in llama.cpp, while autoawq will include this tensor in model
2440
2436
# To prevent errors, skip loading lm_head.weight.
2441
2437
if name == "lm_head.weight" :
2442
- print (f"Skipping get tensor { name !r} in safetensors so that convert can end normally." )
2438
+ logger . debug (f"Skipping get tensor { name !r} in safetensors so that convert can end normally." )
2443
2439
continue
2444
2440
2445
2441
old_dtype = data_torch .dtype
0 commit comments