@@ -101,18 +101,30 @@ def mobilenet_v2(
101
101
) -> QuantizableMobileNetV2 :
102
102
"""
103
103
Constructs a MobileNetV2 architecture from
104
- `" MobileNetV2: Inverted Residuals and Linear Bottlenecks"
104
+ `MobileNetV2: Inverted Residuals and Linear Bottlenecks
105
105
<https://arxiv.org/abs/1801.04381>`_.
106
106
107
107
Note that quantize = True returns a quantized model with 8 bit
108
108
weights. Quantized models only support inference and run on CPUs.
109
109
GPU inference is not yet supported
110
110
111
111
Args:
112
- weights (GoogLeNet_QuantizedWeights or GoogLeNet_Weights, optional): The pretrained
113
- weights for the model
114
- progress (bool): If True, displays a progress bar of the download to stderr
115
- quantize(bool): If True, returns a quantized model, else returns a float model
112
+ weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
113
+ pretrained weights for the model. See
114
+ :class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for
115
+ more details, and possible values. By default, no pre-trained
116
+ weights are used.
117
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
118
+ quantize (bool, optional): If True, returns a quantized version of the model. Default is False.
119
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2``
120
+ base class. Please refer to the `source code
121
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv2.py>`_
122
+ for more details about this class.
123
+ .. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights
124
+ :members:
125
+ .. autoclass:: torchvision.models.MobileNet_V2_Weights
126
+ :members:
127
+ :noindex:
116
128
"""
117
129
weights = (MobileNet_V2_QuantizedWeights if quantize else MobileNet_V2_Weights ).verify (weights )
118
130
0 commit comments