Skip to content

Commit 5bfca84

Browse files
committed
fix config
Signed-off-by: Kaihui-intel <[email protected]>
1 parent 26b10fa commit 5bfca84

File tree

1 file changed

+8
-9
lines changed

1 file changed

+8
-9
lines changed

neural_compressor/torch/quantization/load_entry.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -37,20 +37,19 @@ def load(output_dir="./saved_results", model=None):
3737
qconfig_file_path = os.path.join(os.path.abspath(os.path.expanduser(output_dir)), "qconfig.json")
3838
with open(qconfig_file_path, "r") as f:
3939
per_op_qconfig = json.load(f)
40+
config_mapping = load_config_mapping(qconfig_file_path, ConfigRegistry.get_all_configs()["torch"])
41+
# select load function
42+
config_object = config_mapping[next(iter(config_mapping))]
4043

41-
if " " in per_op_qconfig.keys(): # ipex qconfig format: {' ': {'q_op_infos': {'0': {'op_type': ...
44+
if isinstance(config_object, (RTNConfig, GPTQConfig, AWQConfig, TEQConfig, AutoRoundConfig)): # WOQ
45+
from neural_compressor.torch.algorithms.weight_only.save_load import load
46+
47+
return load(output_dir)
48+
elif " " in per_op_qconfig.keys(): # ipex qconfig format: {' ': {'q_op_infos': {'0': {'op_type': ...
4249
from neural_compressor.torch.algorithms.static_quant import load
4350

4451
return load(output_dir)
4552
else: # FP8
46-
config_mapping = load_config_mapping(qconfig_file_path, ConfigRegistry.get_all_configs()["torch"])
47-
# select load function
48-
config_object = config_mapping[next(iter(config_mapping))]
49-
if isinstance(config_object, (RTNConfig, GPTQConfig, AWQConfig, TEQConfig, AutoRoundConfig)): # WOQ
50-
from neural_compressor.torch.algorithms.weight_only.save_load import load
51-
52-
return load(output_dir)
53-
5453
model.qconfig = config_mapping
5554
if isinstance(config_object, FP8Config):
5655
from neural_compressor.torch.algorithms.habana_fp8 import load

0 commit comments

Comments
 (0)