Skip to content

Commit e0af2df

Browse files
committed
convert-hf : support outtype templating in outfile name
* convert-hf : rename --outtype auto-f16 to --outtype auto
1 parent d3d32a6 commit e0af2df

File tree

1 file changed

+16
-13
lines changed

1 file changed

+16
-13
lines changed

convert-hf-to-gguf.py

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -48,28 +48,27 @@ class Model:
4848

4949
dir_model: Path
5050
ftype: int
51-
fname_out: Path
5251
is_big_endian: bool
5352
endianess: gguf.GGUFEndian
5453
use_temp_file: bool
5554
lazy: bool
5655
part_names: list[str]
5756
is_safetensors: bool
5857
hparams: dict[str, Any]
59-
gguf_writer: gguf.GGUFWriter
6058
block_count: int
6159
tensor_map: gguf.TensorNameMap
6260
tensor_names: set[str] | None
61+
fname_out: Path
62+
gguf_writer: gguf.GGUFWriter
6363

6464
# subclasses should define this!
6565
model_arch: gguf.MODEL_ARCH
6666

67-
def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool):
67+
def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool):
6868
if type(self) is Model:
6969
raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
7070
self.dir_model = dir_model
7171
self.ftype = ftype
72-
self.fname_out = fname_out
7372
self.is_big_endian = is_big_endian
7473
self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
7574
self.use_temp_file = use_temp_file
@@ -79,7 +78,6 @@ def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian:
7978
if not self.is_safetensors:
8079
self.part_names = Model.get_model_part_names(self.dir_model, ".bin")
8180
self.hparams = Model.load_hparams(self.dir_model)
82-
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file)
8381
self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"])
8482
self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
8583
self.tensor_names = None
@@ -92,6 +90,11 @@ def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian:
9290
else:
9391
logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
9492
self.ftype = gguf.LlamaFileType.MOSTLY_BF16
93+
ftype_up: str = self.ftype.name.partition("_")[2].upper()
94+
ftype_lw: str = ftype_up.lower()
95+
# allow templating the file name with the output ftype, useful with the "auto" ftype
96+
self.fname_out = fname_out.parent / fname_out.name.format(ftype_lw, outtype=ftype_lw, ftype=ftype_lw, OUTTYPE=ftype_up, FTYPE=ftype_up)
97+
self.gguf_writer = gguf.GGUFWriter(self.fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file)
9598

9699
@classmethod
97100
def __init_subclass__(cls):
@@ -2400,11 +2403,11 @@ def parse_args() -> argparse.Namespace:
24002403
)
24012404
parser.add_argument(
24022405
"--outfile", type=Path,
2403-
help="path to write to; default: based on input",
2406+
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
24042407
)
24052408
parser.add_argument(
2406-
"--outtype", type=str, choices=["f32", "f16", "bf16", "auto-f16"], default="f16",
2407-
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, auto-f16 for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
2409+
"--outtype", type=str, choices=["f32", "f16", "bf16", "auto"], default="f16",
2410+
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
24082411
)
24092412
parser.add_argument(
24102413
"--bigendian", action="store_true",
@@ -2462,14 +2465,14 @@ def main() -> None:
24622465
"f32": gguf.LlamaFileType.ALL_F32,
24632466
"f16": gguf.LlamaFileType.MOSTLY_F16,
24642467
"bf16": gguf.LlamaFileType.MOSTLY_BF16,
2465-
"auto-f16": gguf.LlamaFileType.GUESSED, # TODO: use a more appropriate "auto" type
2468+
"auto": gguf.LlamaFileType.GUESSED,
24662469
}
24672470

24682471
if args.outfile is not None:
24692472
fname_out = args.outfile
24702473
else:
24712474
# output in the same directory as the model by default
2472-
fname_out = dir_model / f'ggml-model-{args.outtype}.gguf'
2475+
fname_out = dir_model / 'ggml-model-{ftype}.gguf'
24732476

24742477
logger.info(f"Loading model: {dir_model.name}")
24752478

@@ -2488,13 +2491,13 @@ def main() -> None:
24882491
model_instance.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
24892492

24902493
if args.vocab_only:
2491-
logger.info(f"Exporting model vocab to '{fname_out}'")
2494+
logger.info(f"Exporting model vocab to '{model_instance.fname_out}'")
24922495
model_instance.write_vocab()
24932496
else:
2494-
logger.info(f"Exporting model to '{fname_out}'")
2497+
logger.info(f"Exporting model to '{model_instance.fname_out}'")
24952498
model_instance.write()
24962499

2497-
logger.info(f"Model successfully exported to '{fname_out}'")
2500+
logger.info(f"Model successfully exported to '{model_instance.fname_out}'")
24982501

24992502

25002503
if __name__ == '__main__':

0 commit comments

Comments
 (0)