Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 5 additions & 10 deletions gptqmodel/models/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,16 +493,11 @@ def skip(*args, **kwargs):
f"Format: Loading of a sym=False model with format={FORMAT.GPTQ} is only supported if produced by gptqmodel version >= {MIN_VERSION_WITH_V2}"
)

# skip v1 to v2 conversion for kernels that can only operate on sym=True (gptq_v1)
if preload_qlinear_kernel not in [IPEXQuantLinear, MarlinQuantLinear, ExllamaEoraQuantLinear]:
t = time.time()
logger.info(f"Format: Converting `{FORMAT_FIELD_JSON}` from `{FORMAT.GPTQ}` to internal `{FORMAT.GPTQ_V2}`.")
model = convert_gptq_v1_to_v2_format(
model,
cfg=qcfg,
qlinear_kernel=preload_qlinear_kernel,
)
logger.info(f"Format: Conversion complete: {time.time() - t}s")
model = convert_gptq_v1_to_v2_format(
model,
cfg=qcfg,
qlinear_kernel=preload_qlinear_kernel,
)

load_checkpoint_in_model = False
qcfg.runtime_format = FORMAT.GPTQ_V2
Expand Down
19 changes: 16 additions & 3 deletions gptqmodel/utils/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
import torch
import torch.nn as nn
import transformers
from gptqmodel.nn_modules.qlinear.exllama_eora import ExllamaEoraQuantLinear
from gptqmodel.nn_modules.qlinear.marlin import MarlinQuantLinear
from huggingface_hub import HfApi, hf_hub_download
from packaging import version
from transformers import AutoConfig, PretrainedConfig
Expand Down Expand Up @@ -353,13 +355,22 @@ def hf_convert_gptq_v1_to_v2_format(
else:
return model, False

# Optionally convert weight from gptq_v1 to v2 format if Kernel is compatible with v2
def convert_gptq_v1_to_v2_format(
model,
cfg: QuantizeConfig,
qlinear_kernel: Type[BaseQuantLinear],
):
# skip v2 to v1 conversion for gptq_v1 kernels
if qlinear_kernel in [IPEXQuantLinear, MarlinQuantLinear, ExllamaEoraQuantLinear]:
return model

# Limit thread usage to avoid auto-parallizataion regression
with tctl.threadpool_limits(limits=1):
t = time.time()
logger.info(
f"Format: Converting `{FORMAT_FIELD_JSON}` from `{FORMAT.GPTQ}` to internal `{FORMAT.GPTQ_V2}`.")

for _, submodule in model.named_modules():
# v1 checkpoint format used to do `qzeros = qzeros -= 1` before serialization, thus the
# additions here do not overflow.
Expand Down Expand Up @@ -438,6 +449,8 @@ def convert_gptq_v1_to_v2_format(
else:
raise NotImplementedError("Only 2,3,4,8 bits are supported.")

logger.info(f"Format: Conversion complete: {time.time() - t}s")

return model


Expand All @@ -457,14 +470,14 @@ def hf_convert_gptq_v2_to_v1_format(
else:
return model, False


# Optionally convert weight from gptq_v2 to v1 export format if Kernel is compatible with v2
def convert_gptq_v2_to_v1_format(
model,
quantize_config: QuantizeConfig,
qlinear_kernel: Type[BaseQuantLinear],
):
# skip v2 to v1 conversion for ipex
if qlinear_kernel == IPEXQuantLinear:
# skip v2 to v1 conversion for gptq_v1 kernels
if qlinear_kernel in [IPEXQuantLinear, MarlinQuantLinear, ExllamaEoraQuantLinear]:
return model

# Limit thread usage to avoid auto-parallizataion regression
Expand Down
Loading