We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f9bee39 commit 09c5dddCopy full SHA for 09c5ddd
torchvision/prototype/models/vision_transformer.py
@@ -11,7 +11,7 @@
11
from ...models.vision_transformer import VisionTransformer, interpolate_embeddings # noqa: F401
12
from ._api import WeightsEnum, Weights
13
from ._meta import _IMAGENET_CATEGORIES
14
-from ._utils import handle_legacy_interface
+from ._utils import handle_legacy_interface, _ovewrite_named_param
15
16
__all__ = [
17
"VisionTransformer",
@@ -111,6 +111,9 @@ def _vision_transformer(
111
) -> VisionTransformer:
112
image_size = kwargs.pop("image_size", 224)
113
114
+ if weights is not None:
115
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
116
+
117
model = VisionTransformer(
118
image_size=image_size,
119
patch_size=patch_size,
0 commit comments