Skip to content

Commit d7e199e

Browse files
committed
convert-hf : support q8_0 conversion
1 parent 5a41992 commit d7e199e

File tree

5 files changed

+157
-57
lines changed

5 files changed

+157
-57
lines changed

convert-hf-to-gguf.py

Lines changed: 22 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -240,23 +240,6 @@ def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: i
240240
return False
241241

242242
def write_tensors(self):
243-
# same as ggml_compute_fp32_to_bf16 in ggml-impl.h
244-
def np_fp32_to_bf16(n: np.ndarray):
245-
# force nan to quiet
246-
n = np.where((n & 0x7fffffff) > 0x7f800000, (n & 0xffff0000) | (64 << 16), n)
247-
# flush subnormals to zero
248-
n = np.where((n & 0x7f800000) == 0, n & 0x80000000, n)
249-
# round to nearest even
250-
n = (n + (0x7fff + ((n >> 16) & 1))) >> 16
251-
return n.astype(np.int16)
252-
253-
# Doing this row-wise is much, much faster than element-wise, hence the signature
254-
v_fp32_to_bf16 = np.vectorize(np_fp32_to_bf16, otypes=[np.int16], signature="(n)->(n)")
255-
if self.lazy:
256-
# TODO: find a way to implicitly wrap np.vectorize functions
257-
# NOTE: the type is changed to reflect otypes passed to np.vectorize above
258-
v_fp32_to_bf16 = gguf.LazyNumpyTensor._wrap_fn(v_fp32_to_bf16, meta_noop=np.int16)
259-
260243
max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
261244

262245
for name, data_torch in self.get_tensors():
@@ -309,27 +292,31 @@ def np_fp32_to_bf16(n: np.ndarray):
309292
))
310293

311294
if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32:
312-
if self.ftype == gguf.LlamaFileType.MOSTLY_F16:
295+
if self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
296+
data = gguf.quantize_bf16(data)
297+
assert data.dtype == np.int16
298+
data_qtype = gguf.GGMLQuantizationType.BF16
299+
300+
elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0 and gguf.can_quantize_to_q8_0(data):
301+
data = gguf.quantize_q8_0(data)
302+
assert data.dtype == np.uint8
303+
data_qtype = gguf.GGMLQuantizationType.Q8_0
304+
305+
else: # default to float16 for quantized tensors
313306
if data_dtype != np.float16:
314307
data = data.astype(np.float16)
315308
data_qtype = gguf.GGMLQuantizationType.F16
316309

317-
elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
318-
if data_dtype != np.float32:
319-
data = data.astype(np.float32)
320-
data = v_fp32_to_bf16(data.view(np.int32))
321-
assert data.dtype == np.int16
322-
data_qtype = gguf.GGMLQuantizationType.BF16
323-
324-
else: # by default, convert to float32
310+
if data_qtype is None: # by default, convert to float32
325311
if data_dtype != np.float32:
326312
data = data.astype(np.float32)
327313
data_qtype = gguf.GGMLQuantizationType.F32
328314

329-
assert data_qtype is not None
330-
315+
block_size, type_size = gguf.GGML_QUANT_SIZES[data_qtype]
331316
# reverse shape to make it similar to the internal ggml dimension order
332-
shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}"
317+
shape_str = f"""{{{', '.join(str(n) for n in reversed(
318+
(*data.shape[:-1], data.shape[-1] * data.dtype.itemsize // type_size * block_size))
319+
)}}}"""
333320

334321
# n_dims is implicit in the shape
335322
logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
@@ -2415,25 +2402,15 @@ class LazyTorchTensor(gguf.LazyBase):
24152402
def numpy(self) -> gguf.LazyNumpyTensor:
24162403
dtype = self._dtype_map[self.dtype]
24172404
return gguf.LazyNumpyTensor(
2418-
meta=np.lib.stride_tricks.as_strided(np.zeros(1, dtype), self.shape, (0 for _ in self.shape)),
2405+
meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape),
24192406
lazy=self._lazy,
24202407
args=(self,),
24212408
func=(lambda s: s[0].numpy())
24222409
)
24232410

24242411
@classmethod
2425-
def eager_to_meta(cls, t: Tensor) -> Tensor:
2426-
if t.is_meta:
2427-
return t
2428-
return t.detach().to("meta")
2429-
2430-
@classmethod
2431-
def meta_with_dtype(cls, m: Tensor, dtype: torch.dtype) -> Tensor:
2432-
m = m.detach()
2433-
if not m.is_meta:
2434-
m = m.to("meta")
2435-
m.dtype = dtype
2436-
return m
2412+
def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: torch.Size) -> Tensor:
2413+
return torch.empty(size=shape, dtype=dtype, device="meta")
24372414

24382415
@classmethod
24392416
def __torch_function__(cls, func, types, args=(), kwargs=None):
@@ -2464,8 +2441,8 @@ def parse_args() -> argparse.Namespace:
24642441
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
24652442
)
24662443
parser.add_argument(
2467-
"--outtype", type=str, choices=["f32", "f16", "bf16", "auto"], default="f16",
2468-
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
2444+
"--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16",
2445+
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
24692446
)
24702447
parser.add_argument(
24712448
"--bigendian", action="store_true",
@@ -2523,6 +2500,7 @@ def main() -> None:
25232500
"f32": gguf.LlamaFileType.ALL_F32,
25242501
"f16": gguf.LlamaFileType.MOSTLY_F16,
25252502
"bf16": gguf.LlamaFileType.MOSTLY_BF16,
2503+
"q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
25262504
"auto": gguf.LlamaFileType.GUESSED,
25272505
}
25282506

gguf-py/gguf/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,6 @@
22
from .lazy import *
33
from .gguf_reader import *
44
from .gguf_writer import *
5+
from .quants import *
56
from .tensor_mapping import *
67
from .vocab import *

gguf-py/gguf/gguf_writer.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
import numpy as np
1414

1515
from .constants import (
16+
GGML_QUANT_SIZES,
1617
GGUF_DEFAULT_ALIGNMENT,
1718
GGUF_MAGIC,
1819
GGUF_VERSION,
@@ -195,7 +196,7 @@ def ggml_pad(x: int, n: int) -> int:
195196
return ((x + n - 1) // n) * n
196197

197198
def add_tensor_info(
198-
self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype[np.float16] | np.dtype[np.float32],
199+
self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype,
199200
tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None,
200201
) -> None:
201202
if self.state is not WriterState.EMPTY:
@@ -208,10 +209,6 @@ def add_tensor_info(
208209
encoded_name = name.encode("utf-8")
209210
self.ti_data += self._pack("Q", len(encoded_name))
210211
self.ti_data += encoded_name
211-
n_dims = len(tensor_shape)
212-
self.ti_data += self._pack("I", n_dims)
213-
for i in range(n_dims):
214-
self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i])
215212
if raw_dtype is None:
216213
if tensor_dtype == np.float16:
217214
dtype = GGMLQuantizationType.F16
@@ -231,6 +228,15 @@ def add_tensor_info(
231228
raise ValueError("Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now")
232229
else:
233230
dtype = raw_dtype
231+
if tensor_dtype == np.uint8:
232+
block_size, type_size = GGML_QUANT_SIZES[raw_dtype]
233+
if tensor_shape[-1] % type_size != 0:
234+
raise ValueError(f"Quantized tensor row size ({tensor_shape[-1]}) is not a multiple of {dtype.name} type size ({type_size})")
235+
tensor_shape = tuple(tensor_shape[:-1]) + (tensor_shape[-1] // type_size * block_size,)
236+
n_dims = len(tensor_shape)
237+
self.ti_data += self._pack("I", n_dims)
238+
for i in range(n_dims):
239+
self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i])
234240
self.ti_data += self._pack("I", dtype)
235241
self.ti_data += self._pack("Q", self.offset_tensor)
236242
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)

gguf-py/gguf/lazy.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from collections import deque
77

88
import numpy as np
9+
from numpy._typing import _Shape
910
from numpy.typing import DTypeLike
1011

1112

@@ -110,7 +111,7 @@ def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
110111
return o
111112

112113
@classmethod
113-
def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike = False) -> Callable[[Any], Any]:
114+
def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]] = False) -> Callable[[Any], Any]:
114115
def wrapped_fn(*args, **kwargs):
115116
if kwargs is None:
116117
kwargs = {}
@@ -130,9 +131,14 @@ def wrapped_fn(*args, **kwargs):
130131
res = args[0]
131132
assert isinstance(res, cls)
132133
res = res._meta
133-
# allow operations to override the dtype
134+
# allow operations to override the dtype and shape
134135
if meta_noop is not True:
135-
res = cls.meta_with_dtype(res, meta_noop)
136+
if isinstance(meta_noop, tuple):
137+
dtype, shape = meta_noop
138+
assert callable(shape)
139+
res = cls.meta_with_dtype_and_shape(dtype, shape(res.shape))
140+
else:
141+
res = cls.meta_with_dtype_and_shape(meta_noop, res.shape)
136142

137143
if isinstance(res, cls._tensor_type):
138144
def collect_replace(t: LazyBase):
@@ -183,12 +189,12 @@ def already_eager_to_eager(_t: LazyBase) -> Any:
183189

184190
@classmethod
185191
def eager_to_meta(cls, t: Any) -> Any:
186-
return cls.meta_with_dtype(t, t.dtype)
192+
return cls.meta_with_dtype_and_shape(t.dtype, t.shape)
187193

188194
# must be overridden, meta tensor init is backend-specific
189195
@classmethod
190196
@abstractmethod
191-
def meta_with_dtype(cls, m: Any, dtype: Any) -> Any: pass
197+
def meta_with_dtype_and_shape(cls, dtype: Any, shape: Any) -> Any: pass
192198

193199
@classmethod
194200
def from_eager(cls, t: Any) -> Any:
@@ -205,15 +211,15 @@ class LazyNumpyTensor(LazyBase):
205211
_tensor_type = np.ndarray
206212

207213
@classmethod
208-
def meta_with_dtype(cls, m: np.ndarray[Any, Any], dtype: DTypeLike) -> np.ndarray[Any, Any]:
214+
def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: _Shape) -> np.ndarray[Any, Any]:
209215
# The initial idea was to use np.nan as the fill value,
210216
# but non-float types like np.int16 can't use that.
211217
# So zero it is.
212218
cheat = np.zeros(1, dtype)
213-
return np.lib.stride_tricks.as_strided(cheat, m.shape, (0 for _ in m.shape))
219+
return np.lib.stride_tricks.as_strided(cheat, shape, (0 for _ in shape))
214220

215221
def astype(self, dtype, *args, **kwargs):
216-
meta = type(self).meta_with_dtype(self._meta, dtype)
222+
meta = type(self).meta_with_dtype_and_shape(dtype, self._meta.shape)
217223
full_args = (self, dtype,) + args
218224
# very important to pass the shared _lazy deque, or else there's an infinite loop somewhere.
219225
return type(self)(meta=meta, args=full_args, lazy=self._lazy, func=(lambda a: a[0].astype(*a[1:], **kwargs)))

gguf-py/gguf/quants.py

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
from __future__ import annotations
2+
from typing import Callable
3+
4+
from numpy.typing import DTypeLike
5+
6+
from .constants import GGML_QUANT_SIZES, GGMLQuantizationType
7+
from .lazy import LazyNumpyTensor
8+
9+
import numpy as np
10+
11+
12+
# same as ggml_compute_fp32_to_bf16 in ggml-impl.h
13+
def __compute_fp32_to_bf16(n: np.ndarray) -> np.ndarray:
14+
n = n.astype(np.float32, copy=False).view(np.int32)
15+
# force nan to quiet
16+
n = np.where((n & 0x7fffffff) > 0x7f800000, (n & 0xffff0000) | (64 << 16), n)
17+
# flush subnormals to zero
18+
n = np.where((n & 0x7f800000) == 0, n & 0x80000000, n)
19+
# round to nearest even
20+
n = (n + (0x7fff + ((n >> 16) & 1))) >> 16
21+
return n.astype(np.int16)
22+
23+
24+
# This is faster than np.vectorize and np.apply_along_axis because it works on more than one row at a time
25+
def __apply_over_grouped_rows(func: Callable[[np.ndarray], np.ndarray], arr: np.ndarray, otype: DTypeLike, oshape: tuple[int, ...]) -> np.ndarray:
26+
rows = arr.reshape((-1, arr.shape[-1]))
27+
osize = 1
28+
for dim in oshape:
29+
osize *= dim
30+
out = np.empty(shape=osize, dtype=otype)
31+
# compute over groups of 16 rows (arbitrary, but seems good for performance)
32+
n_groups = rows.shape[0] // 16
33+
np.concatenate([func(group).ravel() for group in np.array_split(rows, n_groups)], axis=0, out=out)
34+
return out.reshape(oshape)
35+
36+
37+
def __quantize_bf16_array(n: np.ndarray) -> np.ndarray:
38+
return __apply_over_grouped_rows(__compute_fp32_to_bf16, arr=n, otype=np.int16, oshape=n.shape)
39+
40+
41+
__quantize_bf16_lazy = LazyNumpyTensor._wrap_fn(__quantize_bf16_array, meta_noop=np.int16)
42+
43+
44+
def quantize_bf16(n: np.ndarray):
45+
if type(n) is LazyNumpyTensor:
46+
return __quantize_bf16_lazy(n)
47+
else:
48+
return __quantize_bf16_array(n)
49+
50+
51+
__q8_block_size, __q8_type_size = GGML_QUANT_SIZES[GGMLQuantizationType.Q8_0]
52+
53+
54+
def can_quantize_to_q8_0(n: np.ndarray) -> bool:
55+
return n.shape[-1] % __q8_block_size == 0
56+
57+
58+
# round away from zero
59+
# ref: https://stackoverflow.com/a/59143326/22827863
60+
def np_roundf(n: np.ndarray) -> np.ndarray:
61+
a = abs(n)
62+
floored = np.floor(a)
63+
b = floored + np.floor(2 * (a - floored))
64+
return np.sign(n) * b
65+
66+
67+
def __quantize_q8_0_shape_change(s: tuple[int, ...]) -> tuple[int, ...]:
68+
return (*s[:-1], s[-1] // __q8_block_size * __q8_type_size)
69+
70+
71+
# Implementation of Q8_0 with bit-exact same results as reference implementation in ggml-quants.c
72+
def __quantize_q8_0_rows(n: np.ndarray) -> np.ndarray:
73+
shape = n.shape
74+
assert shape[-1] % __q8_block_size == 0
75+
76+
n_blocks = n.size // __q8_block_size
77+
78+
blocks = n.reshape((n_blocks, __q8_block_size)).astype(np.float32, copy=False)
79+
80+
d = abs(blocks).max(axis=1, keepdims=True) / 127
81+
with np.errstate(divide="ignore"):
82+
id = np.where(d == 0, 0, 1 / d)
83+
qs = np_roundf(blocks * id)
84+
85+
# (n_blocks, 2)
86+
d = d.astype(np.float16).view(np.uint8)
87+
# (n_blocks, block_size)
88+
qs = qs.astype(np.int8).view(np.uint8)
89+
90+
assert d.shape[1] + qs.shape[1] == __q8_type_size
91+
92+
return np.concatenate([d, qs], axis=1).reshape(__quantize_q8_0_shape_change(shape))
93+
94+
95+
def __quantize_q8_0_array(n: np.ndarray) -> np.ndarray:
96+
return __apply_over_grouped_rows(__quantize_q8_0_rows, arr=n, otype=np.uint8, oshape=__quantize_q8_0_shape_change(n.shape))
97+
98+
99+
__quantize_q8_0_lazy = LazyNumpyTensor._wrap_fn(
100+
__quantize_q8_0_array,
101+
meta_noop=(np.uint8, __quantize_q8_0_shape_change),
102+
)
103+
104+
105+
def quantize_q8_0(data: np.ndarray):
106+
if type(data) is LazyNumpyTensor:
107+
return __quantize_q8_0_lazy(data)
108+
else:
109+
return __quantize_q8_0_array(data)

0 commit comments

Comments
 (0)