File tree Expand file tree Collapse file tree 1 file changed +4
-0
lines changed
torchvision/models/quantization Expand file tree Collapse file tree 1 file changed +4
-0
lines changed Original file line number Diff line number Diff line change @@ -140,6 +140,10 @@ def _mobilenet_v3_model(
140
140
_replace_relu (model )
141
141
142
142
if quantize :
143
+ # Instead of quantizing the model and then loading the quantized weights we take a different approach.
144
+ # We prepare the QAT model, load the QAT weights from training and then convert it.
145
+ # This is done to avoid extremely low accuracies observed on the specific model. This is rather a workaround
146
+ # for an unresolved bug on the eager quantization API detailed at: https://github.com/pytorch/vision/issues/5890
143
147
model .fuse_model (is_qat = True )
144
148
model .qconfig = torch .ao .quantization .get_default_qat_qconfig (backend )
145
149
torch .ao .quantization .prepare_qat (model , inplace = True )
You can’t perform that action at this time.
0 commit comments