@@ -603,16 +603,16 @@ def vit_b_16(*, weights: Optional[ViT_B_16_Weights] = None, progress: bool = Tru
603
603
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
604
604
605
605
Args:
606
- weights (:class:`~torchvision.models.vision_transformer. ViT_B_16_Weights`, optional): The pretrained
607
- weights to use. See :class:`~torchvision.models.vision_transformer. ViT_B_16_Weights`
606
+ weights (:class:`~torchvision.models.ViT_B_16_Weights`, optional): The pretrained
607
+ weights to use. See :class:`~torchvision.models.ViT_B_16_Weights`
608
608
below for more details and possible values. By default, no pre-trained weights are used.
609
609
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
610
610
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
611
611
base class. Please refer to the `source code
612
612
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
613
613
for more details about this class.
614
614
615
- .. autoclass:: torchvision.models.vision_transformer. ViT_B_16_Weights
615
+ .. autoclass:: torchvision.models.ViT_B_16_Weights
616
616
:members:
617
617
"""
618
618
weights = ViT_B_16_Weights .verify (weights )
@@ -636,16 +636,16 @@ def vit_b_32(*, weights: Optional[ViT_B_32_Weights] = None, progress: bool = Tru
636
636
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
637
637
638
638
Args:
639
- weights (:class:`~torchvision.models.vision_transformer. ViT_B_32_Weights`, optional): The pretrained
640
- weights to use. See :class:`~torchvision.models.vision_transformer. ViT_B_32_Weights`
639
+ weights (:class:`~torchvision.models.ViT_B_32_Weights`, optional): The pretrained
640
+ weights to use. See :class:`~torchvision.models.ViT_B_32_Weights`
641
641
below for more details and possible values. By default, no pre-trained weights are used.
642
642
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
643
643
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
644
644
base class. Please refer to the `source code
645
645
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
646
646
for more details about this class.
647
647
648
- .. autoclass:: torchvision.models.vision_transformer. ViT_B_32_Weights
648
+ .. autoclass:: torchvision.models.ViT_B_32_Weights
649
649
:members:
650
650
"""
651
651
weights = ViT_B_32_Weights .verify (weights )
@@ -669,16 +669,16 @@ def vit_l_16(*, weights: Optional[ViT_L_16_Weights] = None, progress: bool = Tru
669
669
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
670
670
671
671
Args:
672
- weights (:class:`~torchvision.models.vision_transformer. ViT_L_16_Weights`, optional): The pretrained
673
- weights to use. See :class:`~torchvision.models.vision_transformer. ViT_L_16_Weights`
672
+ weights (:class:`~torchvision.models.ViT_L_16_Weights`, optional): The pretrained
673
+ weights to use. See :class:`~torchvision.models.ViT_L_16_Weights`
674
674
below for more details and possible values. By default, no pre-trained weights are used.
675
675
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
676
676
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
677
677
base class. Please refer to the `source code
678
678
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
679
679
for more details about this class.
680
680
681
- .. autoclass:: torchvision.models.vision_transformer. ViT_L_16_Weights
681
+ .. autoclass:: torchvision.models.ViT_L_16_Weights
682
682
:members:
683
683
"""
684
684
weights = ViT_L_16_Weights .verify (weights )
@@ -702,16 +702,16 @@ def vit_l_32(*, weights: Optional[ViT_L_32_Weights] = None, progress: bool = Tru
702
702
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
703
703
704
704
Args:
705
- weights (:class:`~torchvision.models.vision_transformer. ViT_L_32_Weights`, optional): The pretrained
706
- weights to use. See :class:`~torchvision.models.vision_transformer. ViT_L_32_Weights`
705
+ weights (:class:`~torchvision.models.ViT_L_32_Weights`, optional): The pretrained
706
+ weights to use. See :class:`~torchvision.models.ViT_L_32_Weights`
707
707
below for more details and possible values. By default, no pre-trained weights are used.
708
708
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
709
709
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
710
710
base class. Please refer to the `source code
711
711
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
712
712
for more details about this class.
713
713
714
- .. autoclass:: torchvision.models.vision_transformer. ViT_L_32_Weights
714
+ .. autoclass:: torchvision.models.ViT_L_32_Weights
715
715
:members:
716
716
"""
717
717
weights = ViT_L_32_Weights .verify (weights )
@@ -734,16 +734,16 @@ def vit_h_14(*, weights: Optional[ViT_H_14_Weights] = None, progress: bool = Tru
734
734
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.
735
735
736
736
Args:
737
- weights (:class:`~torchvision.models.vision_transformer. ViT_H_14_Weights`, optional): The pretrained
738
- weights to use. See :class:`~torchvision.models.vision_transformer. ViT_H_14_Weights`
737
+ weights (:class:`~torchvision.models.ViT_H_14_Weights`, optional): The pretrained
738
+ weights to use. See :class:`~torchvision.models.ViT_H_14_Weights`
739
739
below for more details and possible values. By default, no pre-trained weights are used.
740
740
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
741
741
**kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
742
742
base class. Please refer to the `source code
743
743
<https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
744
744
for more details about this class.
745
745
746
- .. autoclass:: torchvision.models.vision_transformer. ViT_H_14_Weights
746
+ .. autoclass:: torchvision.models.ViT_H_14_Weights
747
747
:members:
748
748
"""
749
749
weights = ViT_H_14_Weights .verify (weights )
0 commit comments