File tree 2 files changed +41
-6
lines changed
torchvision/prototype/models/quantization 2 files changed +41
-6
lines changed Original file line number Diff line number Diff line change @@ -56,22 +56,37 @@ def _mobilenet_v3_model(
56
56
return model
57
57
58
58
59
+ _COMMON_META = {
60
+ "size" : (224 , 224 ),
61
+ "categories" : _IMAGENET_CATEGORIES ,
62
+ "interpolation" : InterpolationMode .BILINEAR ,
63
+ "backend" : "qnnpack" ,
64
+ "quantization" : "qat" ,
65
+ "recipe" : "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3" ,
66
+ }
67
+
68
+
59
69
class QuantizedMobileNetV3LargeWeights (Weights ):
60
70
ImageNet1K_QNNPACK_RefV1 = WeightEntry (
61
71
url = "https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth" ,
62
72
transforms = partial (ImageNetEval , crop_size = 224 ),
63
73
meta = {
64
- "size" : (224 , 224 ),
65
- "categories" : _IMAGENET_CATEGORIES ,
66
- "interpolation" : InterpolationMode .BILINEAR ,
67
- "backend" : "qnnpack" ,
68
- "quantization" : "qat" ,
69
- "recipe" : "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3" ,
74
+ ** _COMMON_META ,
70
75
"unquantized" : MobileNetV3LargeWeights .ImageNet1K_RefV1 ,
71
76
"acc@1" : 73.004 ,
72
77
"acc@5" : 90.858 ,
73
78
},
74
79
)
80
+ ImageNet1K_QNNPACK_RefV2 = WeightEntry (
81
+ url = "https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-a5ce659d.pth" ,
82
+ transforms = partial (ImageNetEval , crop_size = 224 , resize_size = 232 ),
83
+ meta = {
84
+ ** _COMMON_META ,
85
+ "unquantized" : MobileNetV3LargeWeights .ImageNet1K_RefV2 ,
86
+ "acc@1" : 74.140 ,
87
+ "acc@5" : 91.982 ,
88
+ },
89
+ )
75
90
76
91
77
92
def mobilenet_v3_large (
Original file line number Diff line number Diff line change @@ -87,6 +87,16 @@ class QuantizedResNet50Weights(Weights):
87
87
"acc@5" : 92.814 ,
88
88
},
89
89
)
90
+ ImageNet1K_FBGEMM_RefV2 = WeightEntry (
91
+ url = "https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth" ,
92
+ transforms = partial (ImageNetEval , crop_size = 224 , resize_size = 232 ),
93
+ meta = {
94
+ ** _COMMON_META ,
95
+ "unquantized" : ResNet50Weights .ImageNet1K_RefV2 ,
96
+ "acc@1" : 80.282 ,
97
+ "acc@5" : 94.976 ,
98
+ },
99
+ )
90
100
91
101
92
102
class QuantizedResNeXt101_32x8dWeights (Weights ):
@@ -100,6 +110,16 @@ class QuantizedResNeXt101_32x8dWeights(Weights):
100
110
"acc@5" : 94.480 ,
101
111
},
102
112
)
113
+ ImageNet1K_FBGEMM_RefV2 = WeightEntry (
114
+ url = "https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth" ,
115
+ transforms = partial (ImageNetEval , crop_size = 224 , resize_size = 232 ),
116
+ meta = {
117
+ ** _COMMON_META ,
118
+ "unquantized" : ResNeXt101_32x8dWeights .ImageNet1K_RefV2 ,
119
+ "acc@1" : 82.574 ,
120
+ "acc@5" : 96.132 ,
121
+ },
122
+ )
103
123
104
124
105
125
def resnet18 (
You can’t perform that action at this time.
0 commit comments