Skip to content

Commit c67a583

Browse files
authored
Clean up model documentation (#6003)
* Remove old "minimum input size" from docstrings. * Remove "currently only XYZ weights available" * Fix description of wide_resnet101_2 * Make display license URLs as links. * Clarify the order of dims of min_size. * Remove lengthy keypoint_names from meta-table.
1 parent ac01659 commit c67a583

File tree

9 files changed

+9
-20
lines changed

9 files changed

+9
-20
lines changed

docs/source/conf.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -347,15 +347,19 @@ def inject_weight_metadata(app, what, name, obj, options, lines):
347347
metrics = meta.pop("metrics", {})
348348
meta_with_metrics = dict(meta, **metrics)
349349

350-
meta_with_metrics.pop("categories", None) # We don't want to document these, they can be too long
350+
# We don't want to document these, they can be too long
351+
for k in ["categories", "keypoint_names"]:
352+
meta_with_metrics.pop(k, None)
351353

352354
custom_docs = meta_with_metrics.pop("_docs", None) # Custom per-Weights docs
353355
if custom_docs is not None:
354356
lines += [custom_docs, ""]
355357

356358
for k, v in meta_with_metrics.items():
357-
if k == "recipe":
359+
if k in {"recipe", "license"}:
358360
v = f"`link <{v}>`__"
361+
elif k == "min_size":
362+
v = f"height={v[0]}, width={v[1]}"
359363
table.append((str(k), str(v)))
360364
table = tabulate(table, tablefmt="rst")
361365
lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css

docs/source/models/mnasnet.rst

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@ Search for Mobile <https://arxiv.org/pdf/1807.11626.pdf>`__ paper.
1111
Model builders
1212
--------------
1313

14-
The following model builders can be used to instanciate an MNASNet model. Currently
15-
only ``mnasnet0_5`` and ``mnasnet1_0`` can be instantiated with pre-trained weights.
14+
The following model builders can be used to instanciate an MNASNet model.
1615
All the model builders internally rely on the
1716
``torchvision.models.mnasnet.MNASNet`` base class. Please refer to the `source
1817
code

torchvision/models/alexnet.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,6 @@ class AlexNet_Weights(WeightsEnum):
7474
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
7575
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
7676
77-
The required minimum input size of the model is 63x63.
78-
7977
.. note::
8078
AlexNet was originally introduced in the `ImageNet Classification with
8179
Deep Convolutional Neural Networks

torchvision/models/densenet.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,6 @@ class DenseNet201_Weights(WeightsEnum):
332332
def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
333333
r"""Densenet-121 model from
334334
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
335-
The required minimum input size of the model is 29x29.
336335
337336
Args:
338337
weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The
@@ -358,7 +357,6 @@ def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool
358357
def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
359358
r"""Densenet-161 model from
360359
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
361-
The required minimum input size of the model is 29x29.
362360
363361
Args:
364362
weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The
@@ -384,7 +382,6 @@ def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool
384382
def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
385383
r"""Densenet-169 model from
386384
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
387-
The required minimum input size of the model is 29x29.
388385
389386
Args:
390387
weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The
@@ -410,7 +407,6 @@ def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool
410407
def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
411408
r"""Densenet-201 model from
412409
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
413-
The required minimum input size of the model is 29x29.
414410
415411
Args:
416412
weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The

torchvision/models/googlenet.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -298,8 +298,6 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T
298298
"""GoogLeNet (Inception v1) model architecture from
299299
`Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`_.
300300
301-
The required minimum input size of the model is 15x15.
302-
303301
Args:
304302
weights (:class:`~torchvision.models.GoogLeNet_Weights`, optional): The
305303
pretrained weights for the model. See

torchvision/models/inception.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,6 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo
430430
"""
431431
Inception v3 model architecture from
432432
`Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`_.
433-
The required minimum input size of the model is 75x75.
434433
435434
.. note::
436435
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of

torchvision/models/quantization/googlenet.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,6 @@ def googlenet(
147147
weights. Quantized models only support inference and run on CPUs.
148148
GPU inference is not yet supported
149149
150-
The required minimum input size of the model is 15x15.
151-
152150
Args:
153151
weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
154152
pretrained weights for the model. See

torchvision/models/resnet.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -831,8 +831,8 @@ def wide_resnet101_2(
831831
832832
The model is the same as ResNet except for the bottleneck number of channels
833833
which is twice larger in every block. The number of channels in outer 1x1
834-
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
835-
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
834+
convolutions is the same, e.g. last block in ResNet-101 has 2048-512-2048
835+
channels, and in Wide ResNet-101-2 has 2048-1024-2048.
836836
837837
Args:
838838
weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The

torchvision/models/squeezenet.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -162,8 +162,6 @@ def squeezenet1_0(
162162
accuracy with 50x fewer parameters and <0.5MB model size
163163
<https://arxiv.org/abs/1602.07360>`_ paper.
164164
165-
The required minimum input size of the model is 21x21.
166-
167165
Args:
168166
weights (:class:`~torchvision.models.SqueezeNet1_0_Weights`, optional): The
169167
pretrained weights to use. See
@@ -193,7 +191,6 @@ def squeezenet1_1(
193191
194192
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
195193
than SqueezeNet 1.0, without sacrificing accuracy.
196-
The required minimum input size of the model is 17x17.
197194
198195
Args:
199196
weights (:class:`~torchvision.models.SqueezeNet1_1_Weights`, optional): The

0 commit comments

Comments
 (0)