Skip to content

Commit 7ad2936

Browse files
committed
Adding new Quantized models.
1 parent 8dcb5b8 commit 7ad2936

File tree

2 files changed

+41
-6
lines changed

2 files changed

+41
-6
lines changed

torchvision/prototype/models/quantization/mobilenetv3.py

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,22 +56,37 @@ def _mobilenet_v3_model(
5656
return model
5757

5858

59+
_COMMON_META = {
60+
"size": (224, 224),
61+
"categories": _IMAGENET_CATEGORIES,
62+
"interpolation": InterpolationMode.BILINEAR,
63+
"backend": "qnnpack",
64+
"quantization": "qat",
65+
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
66+
}
67+
68+
5969
class QuantizedMobileNetV3LargeWeights(Weights):
6070
ImageNet1K_QNNPACK_RefV1 = WeightEntry(
6171
url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
6272
transforms=partial(ImageNetEval, crop_size=224),
6373
meta={
64-
"size": (224, 224),
65-
"categories": _IMAGENET_CATEGORIES,
66-
"interpolation": InterpolationMode.BILINEAR,
67-
"backend": "qnnpack",
68-
"quantization": "qat",
69-
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
74+
**_COMMON_META,
7075
"unquantized": MobileNetV3LargeWeights.ImageNet1K_RefV1,
7176
"acc@1": 73.004,
7277
"acc@5": 90.858,
7378
},
7479
)
80+
ImageNet1K_QNNPACK_RefV2 = WeightEntry(
81+
url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-a5ce659d.pth",
82+
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
83+
meta={
84+
**_COMMON_META,
85+
"unquantized": MobileNetV3LargeWeights.ImageNet1K_RefV2,
86+
"acc@1": 74.140,
87+
"acc@5": 91.982,
88+
},
89+
)
7590

7691

7792
def mobilenet_v3_large(

torchvision/prototype/models/quantization/resnet.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,16 @@ class QuantizedResNet50Weights(Weights):
8787
"acc@5": 92.814,
8888
},
8989
)
90+
ImageNet1K_FBGEMM_RefV2 = WeightEntry(
91+
url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth",
92+
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
93+
meta={
94+
**_COMMON_META,
95+
"unquantized": ResNet50Weights.ImageNet1K_RefV2,
96+
"acc@1": 80.282,
97+
"acc@5": 94.976,
98+
},
99+
)
90100

91101

92102
class QuantizedResNeXt101_32x8dWeights(Weights):
@@ -100,6 +110,16 @@ class QuantizedResNeXt101_32x8dWeights(Weights):
100110
"acc@5": 94.480,
101111
},
102112
)
113+
ImageNet1K_FBGEMM_RefV2 = WeightEntry(
114+
url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth",
115+
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
116+
meta={
117+
**_COMMON_META,
118+
"unquantized": ResNeXt101_32x8dWeights.ImageNet1K_RefV2,
119+
"acc@1": 82.574,
120+
"acc@5": 96.132,
121+
},
122+
)
103123

104124

105125
def resnet18(

0 commit comments

Comments
 (0)