|
| 1 | +#!/usr/bin/env python3 |
| 2 | +from __future__ import annotations |
| 3 | + |
| 4 | +import argparse |
| 5 | +import json |
| 6 | +import logging |
| 7 | +import os |
| 8 | +import struct |
| 9 | +import sys |
| 10 | +from json import JSONDecodeError |
| 11 | +from pathlib import Path |
| 12 | +from typing import Any, Dict, List |
| 13 | + |
| 14 | +import numpy as np |
| 15 | +import torch |
| 16 | +from transformers import AutoTokenizer |
| 17 | + |
| 18 | +if "NO_LOCAL_GGUF" not in os.environ: |
| 19 | + sys.path.insert(1, str(Path(__file__).parent / "gguf-py" / "gguf")) |
| 20 | +import gguf |
| 21 | + |
| 22 | +# Configure logging |
| 23 | +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") |
| 24 | + |
| 25 | + |
| 26 | +def check_required_files(directory: Path, required_files: List[str]) -> None: |
| 27 | + missing_files = [ |
| 28 | + file_name |
| 29 | + for file_name in required_files |
| 30 | + if not (directory / file_name).exists() |
| 31 | + ] |
| 32 | + if missing_files: |
| 33 | + raise FileNotFoundError(f"Missing required files: {', '.join(missing_files)}") |
| 34 | + |
| 35 | + |
| 36 | +def get_json_map(file_path: Path) -> dict[str, Any]: |
| 37 | + with open(file_path, "r") as source_file: |
| 38 | + try: |
| 39 | + return json.load(source_file) |
| 40 | + except JSONDecodeError: |
| 41 | + raise ValueError(f"Failed to decode {file_path}") |
| 42 | + |
| 43 | + |
| 44 | +def load_hyper_params(directory: Path, architecture: str) -> dict: |
| 45 | + config_path = directory / "config.json" |
| 46 | + hparams = get_json_map(config_path) |
| 47 | + |
| 48 | + # Ensure the expected architecture is present |
| 49 | + expected_architecture = architecture |
| 50 | + if hparams["architectures"][0] != expected_architecture: |
| 51 | + raise ValueError( |
| 52 | + f"Model architecture not supported: {hparams['architectures'][0]}" |
| 53 | + ) |
| 54 | + |
| 55 | + return hparams |
| 56 | + |
| 57 | + |
| 58 | +def initialize_writer( |
| 59 | + fname_out: str, architecture: str, ftype: str, hparams: Dict[str, Any] |
| 60 | +) -> gguf.GGUFWriter: |
| 61 | + """ |
| 62 | + Initializes the GGUF writer with the model metadata. |
| 63 | +
|
| 64 | + :param fname_out: The filename for the output model. |
| 65 | + :param architecture: The model architecture enum name. |
| 66 | + :param ftype: The data type for the model file (e.g., 'F32', 'F16'). |
| 67 | + :param hparams: The hyperparameters loaded from the model's config file. |
| 68 | + :return: An initialized GGUF writer object. |
| 69 | + """ |
| 70 | + # Validate the architecture name |
| 71 | + if not hasattr(gguf.MODEL_ARCH, architecture): |
| 72 | + raise ValueError(f"Unsupported architecture: {architecture}") |
| 73 | + ARCH = getattr(gguf.MODEL_ARCH, architecture) |
| 74 | + |
| 75 | + # Validate the file type |
| 76 | + if ftype not in ['F32', 'F16']: |
| 77 | + raise ValueError(f"Unsupported file type: {ftype}") |
| 78 | + |
| 79 | + # Initialize the GGUF writer |
| 80 | + gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) |
| 81 | + |
| 82 | + # Set the writer with the hyperparameters from MixFormerSequentialConfig |
| 83 | + gguf_writer.add_name(gguf.MODEL_ARCH_NAMES[ARCH]) |
| 84 | + gguf_writer.add_context_length(hparams.get("n_positions", 2048)) |
| 85 | + gguf_writer.add_embedding_length(hparams.get("n_embd", 1024)) |
| 86 | + n_inner = hparams.get("n_inner", 4 * hparams.get("n_embd", 1024)) |
| 87 | + gguf_writer.add_feed_forward_length(n_inner) |
| 88 | + gguf_writer.add_block_count(hparams.get("n_layer", 20)) |
| 89 | + gguf_writer.add_head_count(hparams.get("n_head", 16)) |
| 90 | + n_head_kv = hparams.get("n_head_kv", hparams.get("n_head", 16)) |
| 91 | + gguf_writer.add_head_count_kv(n_head_kv) # NOTE: arxiv:2203.11082 |
| 92 | + gguf_writer.add_layer_norm_eps(hparams.get("layer_norm_epsilon", 1e-5)) |
| 93 | + |
| 94 | + # Add the file type |
| 95 | + gguf_writer.add_file_type(ftype) |
| 96 | + |
| 97 | + return gguf_writer |
| 98 | + |
| 99 | + |
| 100 | +def parse_args() -> argparse.Namespace: |
| 101 | + parser = argparse.ArgumentParser( |
| 102 | + description="Convert a Phi-1 model to a GGML compatible file" |
| 103 | + ) |
| 104 | + parser.add_argument( |
| 105 | + "--vocab-only", action="store_true", help="extract only the vocab" |
| 106 | + ) |
| 107 | + parser.add_argument( |
| 108 | + "--outfile", type=Path, help="path to write to; default: based on input" |
| 109 | + ) |
| 110 | + parser.add_argument( |
| 111 | + "model", |
| 112 | + type=Path, |
| 113 | + help="directory containing model file, or model file itself (*.bin)", |
| 114 | + ) |
| 115 | + parser.add_argument( |
| 116 | + "--ftype", |
| 117 | + type=str, |
| 118 | + choices=["f32", "f16"], |
| 119 | + default="f16", # NOTE: Phi-1 is dtype float16. |
| 120 | + help="output format - use 'float32' for 32-bit tensors, 'float16' for 16-bit tensors", |
| 121 | + ) |
| 122 | + return parser.parse_args() |
| 123 | + |
| 124 | + |
| 125 | +def main(): |
| 126 | + try: |
| 127 | + args = parse_args() |
| 128 | + |
| 129 | + ftype = args.ftype |
| 130 | + directory = args.model # Renamed for clarity |
| 131 | + |
| 132 | + if not directory.is_dir(): |
| 133 | + raise NotADirectoryError(f"{directory} is not a directory.") |
| 134 | + |
| 135 | + required_files = ["pytorch_model.bin", "config.json", "tokenizer.json"] |
| 136 | + check_required_files(directory, required_files) |
| 137 | + |
| 138 | + # Reference the actual model file |
| 139 | + model = directory / "pytorch_model.bin" |
| 140 | + if not model.exists(): |
| 141 | + raise FileNotFoundError(f"Model file {model} does not exist.") |
| 142 | + |
| 143 | + hparams = load_hyper_params(directory, "MixFormerSequentialForCausalLM") |
| 144 | + architecture = hparams["architectures"][0] |
| 145 | + |
| 146 | + if args.outfile is not None: |
| 147 | + fname_out = args.outfile |
| 148 | + else: |
| 149 | + fname_out = directory / f"ggml-model-{ftype}.gguf" |
| 150 | + |
| 151 | + if not fname_out.parent.exists(): |
| 152 | + logging.warning(f"Output directory {fname_out.parent} does not exist.") |
| 153 | + |
| 154 | + gguf_writer = initialize_writer(fname_out, architecture, ftype, hparams) |
| 155 | + |
| 156 | + # Proceed with the model processing using the 'model' path |
| 157 | + # ... [rest of your existing code] ... |
| 158 | + |
| 159 | + except Exception as e: |
| 160 | + logging.error(e) |
| 161 | + sys.exit(1) |
| 162 | + |
| 163 | + |
| 164 | +if __name__ == "__main__": |
| 165 | + main() |
| 166 | + |
| 167 | + |
| 168 | +# # TOKENIZATION |
| 169 | + |
| 170 | +# print("gguf: get tokenizer metadata") |
| 171 | + |
| 172 | +# tokens: list[bytearray] = [] |
| 173 | +# scores: list[float] = [] |
| 174 | +# toktypes: list[int] = [] |
| 175 | + |
| 176 | +# # gpt2 tokenizer |
| 177 | +# gguf_writer.add_tokenizer_model("gpt2") |
| 178 | + |
| 179 | +# print("gguf: get gpt2 tokenizer vocab") |
| 180 | + |
| 181 | +# # ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py |
| 182 | +# tokenizer = AutoTokenizer.from_pretrained(dir_model) |
| 183 | + |
| 184 | +# # The number of tokens in tokenizer.json can differ from the expected vocab size. |
| 185 | +# # This causes downstream issues with mismatched tensor sizes when running the inference |
| 186 | +# vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) |
| 187 | +# assert max(tokenizer.vocab.values()) < vocab_size |
| 188 | + |
| 189 | +# added_vocab = tokenizer.get_added_vocab() |
| 190 | +# reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} |
| 191 | + |
| 192 | +# for i in range(vocab_size): |
| 193 | +# if i not in reverse_vocab: |
| 194 | +# tokens.append(f"[PAD{i}]") |
| 195 | +# toktypes.append(gguf.TokenType.USER_DEFINED) |
| 196 | +# elif reverse_vocab[i] in added_vocab: |
| 197 | +# tokens.append(reverse_vocab[i]) |
| 198 | +# if tokenizer.added_tokens_decoder[i].special: |
| 199 | +# toktypes.append(gguf.TokenType.CONTROL) |
| 200 | +# else: |
| 201 | +# toktypes.append(gguf.TokenType.USER_DEFINED) |
| 202 | +# else: |
| 203 | +# tokens.append(reverse_vocab[i]) |
| 204 | +# toktypes.append(gguf.TokenType.NORMAL) |
| 205 | + |
| 206 | +# gguf_writer.add_token_list(tokens) |
| 207 | +# gguf_writer.add_token_types(toktypes) |
| 208 | +# special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab=len(tokens)) |
| 209 | +# special_vocab.add_to_gguf(gguf_writer) |
| 210 | + |
| 211 | +# # TENSORS |
| 212 | + |
| 213 | +# tensor_map = gguf.get_tensor_name_map(ARCH, block_count) |
| 214 | + |
| 215 | +# # params for qkv transform |
| 216 | +# n_head = hparams["n_head"] |
| 217 | +# n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1 |
| 218 | + |
| 219 | +# head_dim = hparams["n_embd"] // n_head |
| 220 | + |
| 221 | +# # tensor info |
| 222 | +# print("gguf: get tensor metadata") |
| 223 | + |
| 224 | +# if num_parts == 0: |
| 225 | +# part_names = iter(("pytorch_model.bin",)) |
| 226 | +# else: |
| 227 | +# part_names = ( |
| 228 | +# f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) |
| 229 | +# ) |
| 230 | + |
| 231 | +# for part_name in part_names: |
| 232 | +# if args.vocab_only: |
| 233 | +# break |
| 234 | +# print("gguf: loading model part '" + part_name + "'") |
| 235 | +# model_part = torch.load(dir_model / part_name, map_location="cpu") |
| 236 | + |
| 237 | +# for name in model_part.keys(): |
| 238 | +# data = model_part[name] |
| 239 | + |
| 240 | +# old_dtype = data.dtype |
| 241 | + |
| 242 | +# # convert any unsupported data types to float32 |
| 243 | +# if data.dtype != torch.float16 and data.dtype != torch.float32: |
| 244 | +# data = data.to(torch.float32) |
| 245 | + |
| 246 | +# data = data.squeeze().numpy() |
| 247 | + |
| 248 | +# # map tensor names |
| 249 | +# new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) |
| 250 | +# if new_name is None: |
| 251 | +# print("Can not map tensor '" + name + "'") |
| 252 | +# sys.exit() |
| 253 | + |
| 254 | +# n_dims = len(data.shape) |
| 255 | +# data_dtype = data.dtype |
| 256 | + |
| 257 | +# # if f32 desired, convert any float16 to float32 |
| 258 | +# if ftype == 0 and data_dtype == np.float16: |
| 259 | +# data = data.astype(np.float32) |
| 260 | + |
| 261 | +# # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 |
| 262 | +# if ftype == 1 and data_dtype == np.float16 and n_dims == 1: |
| 263 | +# data = data.astype(np.float32) |
| 264 | + |
| 265 | +# # if f16 desired, convert any float32 2-dim weight tensors to float16 |
| 266 | +# if ( |
| 267 | +# ftype == 1 |
| 268 | +# and data_dtype == np.float32 |
| 269 | +# and name.endswith(".weight") |
| 270 | +# and n_dims == 2 |
| 271 | +# ): |
| 272 | +# data = data.astype(np.float16) |
| 273 | + |
| 274 | +# print( |
| 275 | +# name, |
| 276 | +# "=>", |
| 277 | +# new_name |
| 278 | +# + ", shape = " |
| 279 | +# + str(data.shape) |
| 280 | +# + ", " |
| 281 | +# + str(old_dtype) |
| 282 | +# + " --> " |
| 283 | +# + str(data.dtype), |
| 284 | +# ) |
| 285 | + |
| 286 | +# gguf_writer.add_tensor(new_name, data) |
| 287 | + |
| 288 | + |
| 289 | +# print("gguf: write header") |
| 290 | +# gguf_writer.write_header_to_file() |
| 291 | +# print("gguf: write metadata") |
| 292 | +# gguf_writer.write_kv_data_to_file() |
| 293 | +# if not args.vocab_only: |
| 294 | +# print("gguf: write tensors") |
| 295 | +# gguf_writer.write_tensors_to_file() |
| 296 | + |
| 297 | +# gguf_writer.close() |
| 298 | + |
| 299 | +# print(f"gguf: model successfully exported to '{fname_out}'") |
| 300 | +# print("") |
0 commit comments