diff --git a/docs/source/conf.py b/docs/source/conf.py index 940098bfed5..258bbf6b5f2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -347,15 +347,19 @@ def inject_weight_metadata(app, what, name, obj, options, lines): metrics = meta.pop("metrics", {}) meta_with_metrics = dict(meta, **metrics) - meta_with_metrics.pop("categories", None) # We don't want to document these, they can be too long + # We don't want to document these, they can be too long + for k in ["categories", "keypoint_names"]: + meta_with_metrics.pop(k, None) custom_docs = meta_with_metrics.pop("_docs", None) # Custom per-Weights docs if custom_docs is not None: lines += [custom_docs, ""] for k, v in meta_with_metrics.items(): - if k == "recipe": + if k in {"recipe", "license"}: v = f"`link <{v}>`__" + elif k == "min_size": + v = f"height={v[0]}, width={v[1]}" table.append((str(k), str(v))) table = tabulate(table, tablefmt="rst") lines += [".. rst-class:: table-weights"] # Custom CSS class, see custom_torchvision.css diff --git a/docs/source/models/mnasnet.rst b/docs/source/models/mnasnet.rst index f902bc2ef38..e31b4aca1b6 100644 --- a/docs/source/models/mnasnet.rst +++ b/docs/source/models/mnasnet.rst @@ -11,8 +11,7 @@ Search for Mobile `__ paper. Model builders -------------- -The following model builders can be used to instanciate an MNASNet model. Currently -only ``mnasnet0_5`` and ``mnasnet1_0`` can be instantiated with pre-trained weights. +The following model builders can be used to instanciate an MNASNet model. All the model builders internally rely on the ``torchvision.models.mnasnet.MNASNet`` base class. Please refer to the `source code diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index 6c9ae4932d7..dff0bbad1a4 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -74,8 +74,6 @@ class AlexNet_Weights(WeightsEnum): def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet: """AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks `__. - The required minimum input size of the model is 63x63. - .. note:: AlexNet was originally introduced in the `ImageNet Classification with Deep Convolutional Neural Networks diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index bf46b113691..47447991f0e 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -332,7 +332,6 @@ class DenseNet201_Weights(WeightsEnum): def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-121 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The @@ -358,7 +357,6 @@ def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-161 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The @@ -384,7 +382,6 @@ def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-169 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The @@ -410,7 +407,6 @@ def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet: r"""Densenet-201 model from `Densely Connected Convolutional Networks `_. - The required minimum input size of the model is 29x29. Args: weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 37c20d72cdd..755740abc11 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -298,8 +298,6 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T """GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions `_. - The required minimum input size of the model is 15x15. - Args: weights (:class:`~torchvision.models.GoogLeNet_Weights`, optional): The pretrained weights for the model. See diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 5b89a1e5c9a..0abd195742c 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -430,7 +430,6 @@ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bo """ Inception v3 model architecture from `Rethinking the Inception Architecture for Computer Vision `_. - The required minimum input size of the model is 75x75. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index 36ec453a38f..892b3408706 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -147,8 +147,6 @@ def googlenet( weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported - The required minimum input size of the model is 15x15. - Args: weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The pretrained weights for the model. See diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index f2910cdc281..e8ed33d5080 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -831,8 +831,8 @@ def wide_resnet101_2( The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. + convolutions is the same, e.g. last block in ResNet-101 has 2048-512-2048 + channels, and in Wide ResNet-101-2 has 2048-1024-2048. Args: weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index afe9c18887f..dd474cbe7f7 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -162,8 +162,6 @@ def squeezenet1_0( accuracy with 50x fewer parameters and <0.5MB model size `_ paper. - The required minimum input size of the model is 21x21. - Args: weights (:class:`~torchvision.models.SqueezeNet1_0_Weights`, optional): The pretrained weights to use. See @@ -193,7 +191,6 @@ def squeezenet1_1( SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters than SqueezeNet 1.0, without sacrificing accuracy. - The required minimum input size of the model is 17x17. Args: weights (:class:`~torchvision.models.SqueezeNet1_1_Weights`, optional): The