File tree Expand file tree Collapse file tree 1 file changed +1
-3
lines changed
torchvision/models/quantization Expand file tree Collapse file tree 1 file changed +1
-3
lines changed Original file line number Diff line number Diff line change @@ -21,7 +21,7 @@ class QuantizableSqueezeExcitation(SElayer):
21
21
_version = 2
22
22
23
23
def __init__ (self , * args : Any , ** kwargs : Any ) -> None :
24
- kwargs ["scale_activation" ] = nn .Hardswish
24
+ kwargs ["scale_activation" ] = nn .Hardsigmoid
25
25
super ().__init__ (* args , ** kwargs )
26
26
self .skip_mul = nn .quantized .FloatFunctional ()
27
27
@@ -49,8 +49,6 @@ def _load_from_state_dict(
49
49
"scale_activation.activation_post_process.zero_point" : torch .tensor ([0 ], dtype = torch .int32 ),
50
50
"scale_activation.activation_post_process.fake_quant_enabled" : torch .tensor ([1 ]),
51
51
"scale_activation.activation_post_process.observer_enabled" : torch .tensor ([1 ]),
52
- "scale_activation.activation_post_process.activation_post_process.min_val" : torch .tensor (float ('inf' )),
53
- "scale_activation.activation_post_process.activation_post_process.max_val" : torch .tensor (- float ('inf' )),
54
52
}
55
53
for k , v in default_state_dict .items ():
56
54
full_key = prefix + k
You can’t perform that action at this time.
0 commit comments