From 376cfbd88551415e2e3140029510c3be6e173995 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 10 Jun 2022 11:17:35 +0100 Subject: [PATCH] Fix ViT and Resnext docs --- docs/source/models/resnext.rst | 1 + torchvision/models/vision_transformer.py | 30 ++++++++++++------------ 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/source/models/resnext.rst b/docs/source/models/resnext.rst index 7dd6126c8c7..5d8325d9b4b 100644 --- a/docs/source/models/resnext.rst +++ b/docs/source/models/resnext.rst @@ -23,3 +23,4 @@ more details about this class. resnext50_32x4d resnext101_32x8d + resnext101_64x4d diff --git a/torchvision/models/vision_transformer.py b/torchvision/models/vision_transformer.py index e67c2a67acd..57c1479b13d 100644 --- a/torchvision/models/vision_transformer.py +++ b/torchvision/models/vision_transformer.py @@ -603,8 +603,8 @@ def vit_b_16(*, weights: Optional[ViT_B_16_Weights] = None, progress: bool = Tru `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale `_. Args: - weights (:class:`~torchvision.models.vision_transformer.ViT_B_16_Weights`, optional): The pretrained - weights to use. See :class:`~torchvision.models.vision_transformer.ViT_B_16_Weights` + weights (:class:`~torchvision.models.ViT_B_16_Weights`, optional): The pretrained + weights to use. See :class:`~torchvision.models.ViT_B_16_Weights` below for more details and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer`` @@ -612,7 +612,7 @@ def vit_b_16(*, weights: Optional[ViT_B_16_Weights] = None, progress: bool = Tru `_ for more details about this class. - .. autoclass:: torchvision.models.vision_transformer.ViT_B_16_Weights + .. autoclass:: torchvision.models.ViT_B_16_Weights :members: """ weights = ViT_B_16_Weights.verify(weights) @@ -636,8 +636,8 @@ def vit_b_32(*, weights: Optional[ViT_B_32_Weights] = None, progress: bool = Tru `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale `_. Args: - weights (:class:`~torchvision.models.vision_transformer.ViT_B_32_Weights`, optional): The pretrained - weights to use. See :class:`~torchvision.models.vision_transformer.ViT_B_32_Weights` + weights (:class:`~torchvision.models.ViT_B_32_Weights`, optional): The pretrained + weights to use. See :class:`~torchvision.models.ViT_B_32_Weights` below for more details and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer`` @@ -645,7 +645,7 @@ def vit_b_32(*, weights: Optional[ViT_B_32_Weights] = None, progress: bool = Tru `_ for more details about this class. - .. autoclass:: torchvision.models.vision_transformer.ViT_B_32_Weights + .. autoclass:: torchvision.models.ViT_B_32_Weights :members: """ weights = ViT_B_32_Weights.verify(weights) @@ -669,8 +669,8 @@ def vit_l_16(*, weights: Optional[ViT_L_16_Weights] = None, progress: bool = Tru `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale `_. Args: - weights (:class:`~torchvision.models.vision_transformer.ViT_L_16_Weights`, optional): The pretrained - weights to use. See :class:`~torchvision.models.vision_transformer.ViT_L_16_Weights` + weights (:class:`~torchvision.models.ViT_L_16_Weights`, optional): The pretrained + weights to use. See :class:`~torchvision.models.ViT_L_16_Weights` below for more details and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer`` @@ -678,7 +678,7 @@ def vit_l_16(*, weights: Optional[ViT_L_16_Weights] = None, progress: bool = Tru `_ for more details about this class. - .. autoclass:: torchvision.models.vision_transformer.ViT_L_16_Weights + .. autoclass:: torchvision.models.ViT_L_16_Weights :members: """ weights = ViT_L_16_Weights.verify(weights) @@ -702,8 +702,8 @@ def vit_l_32(*, weights: Optional[ViT_L_32_Weights] = None, progress: bool = Tru `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale `_. Args: - weights (:class:`~torchvision.models.vision_transformer.ViT_L_32_Weights`, optional): The pretrained - weights to use. See :class:`~torchvision.models.vision_transformer.ViT_L_32_Weights` + weights (:class:`~torchvision.models.ViT_L_32_Weights`, optional): The pretrained + weights to use. See :class:`~torchvision.models.ViT_L_32_Weights` below for more details and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer`` @@ -711,7 +711,7 @@ def vit_l_32(*, weights: Optional[ViT_L_32_Weights] = None, progress: bool = Tru `_ for more details about this class. - .. autoclass:: torchvision.models.vision_transformer.ViT_L_32_Weights + .. autoclass:: torchvision.models.ViT_L_32_Weights :members: """ weights = ViT_L_32_Weights.verify(weights) @@ -734,8 +734,8 @@ def vit_h_14(*, weights: Optional[ViT_H_14_Weights] = None, progress: bool = Tru `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale `_. Args: - weights (:class:`~torchvision.models.vision_transformer.ViT_H_14_Weights`, optional): The pretrained - weights to use. See :class:`~torchvision.models.vision_transformer.ViT_H_14_Weights` + weights (:class:`~torchvision.models.ViT_H_14_Weights`, optional): The pretrained + weights to use. See :class:`~torchvision.models.ViT_H_14_Weights` below for more details and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer`` @@ -743,7 +743,7 @@ def vit_h_14(*, weights: Optional[ViT_H_14_Weights] = None, progress: bool = Tru `_ for more details about this class. - .. autoclass:: torchvision.models.vision_transformer.ViT_H_14_Weights + .. autoclass:: torchvision.models.ViT_H_14_Weights :members: """ weights = ViT_H_14_Weights.verify(weights)