Skip to content

Commit b0f9803

Browse files
committed
update calibration.py
Signed-off-by: yuwenzho <[email protected]>
1 parent 74a8362 commit b0f9803

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

neural_compressor/adaptor/ox_utils/calibration.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,7 @@ def dump_minmax(self, q_config):
548548
# pipeline of getting calib ranges of tensors during calibration:
549549
# 1. augment_graph(): insert activation tensors to model output
550550
# 2. get_intermediate_outputs():
551-
# 2.1 get_activation_tensors_calib_range(): get calib ranges of activation tensors using the augmnet graph
551+
# 2.1 get_activation_tensors_calib_range(): get calib ranges of activation tensors using the augment graph
552552
# 2.2 get_weight_tensors_calib_range(): get calib ranges of weight tensors
553553
self.augment_graph()
554554
node_output_names, output_dicts = self.get_intermediate_outputs(q_config)
@@ -631,7 +631,7 @@ def dump_tensor(self, activation=True, weight=False, format=None):
631631
self.dynamically_quantized = "DynamicQuantizeLinear" in [node.op_type for node in self.model.graph.node]
632632
is_qdq = format == "qdq"
633633
if activation:
634-
self.augment_graph(inspect_tensor=True) # add activation tensors to model output
634+
self.augment_graph() # add activation tensors to model output
635635
_, output_dicts = self.get_intermediate_outputs(activation_only=not weight, weight_only=not activation)
636636
iters = len(list(output_dicts.values())[-1])
637637
map_node_activation = [{} for _ in range(iters)]

0 commit comments

Comments
 (0)