Skip to content

Commit d4d20f0

Browse files
authored
make type alias private (#7266)
1 parent e405f3c commit d4d20f0

24 files changed

+151
-151
lines changed

torchvision/datapoints/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from ._bounding_box import BoundingBox, BoundingBoxFormat
2-
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
3-
from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
2+
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
3+
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
44
from ._mask import Mask
5-
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
5+
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
66

77
from ._dataset_wrapper import wrap_dataset_for_transforms_v2 # type: ignore[attr-defined] # usort: skip

torchvision/datapoints/_bounding_box.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from torchvision._utils import StrEnum
77
from torchvision.transforms import InterpolationMode # TODO: this needs to be moved out of transforms
88

9-
from ._datapoint import Datapoint, FillTypeJIT
9+
from ._datapoint import _FillTypeJIT, Datapoint
1010

1111

1212
class BoundingBoxFormat(StrEnum):
@@ -136,7 +136,7 @@ def rotate(
136136
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
137137
expand: bool = False,
138138
center: Optional[List[float]] = None,
139-
fill: FillTypeJIT = None,
139+
fill: _FillTypeJIT = None,
140140
) -> BoundingBox:
141141
output, spatial_size = self._F.rotate_bounding_box(
142142
self.as_subclass(torch.Tensor),
@@ -155,7 +155,7 @@ def affine(
155155
scale: float,
156156
shear: List[float],
157157
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
158-
fill: FillTypeJIT = None,
158+
fill: _FillTypeJIT = None,
159159
center: Optional[List[float]] = None,
160160
) -> BoundingBox:
161161
output = self._F.affine_bounding_box(
@@ -175,7 +175,7 @@ def perspective(
175175
startpoints: Optional[List[List[int]]],
176176
endpoints: Optional[List[List[int]]],
177177
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
178-
fill: FillTypeJIT = None,
178+
fill: _FillTypeJIT = None,
179179
coefficients: Optional[List[float]] = None,
180180
) -> BoundingBox:
181181
output = self._F.perspective_bounding_box(
@@ -192,7 +192,7 @@ def elastic(
192192
self,
193193
displacement: torch.Tensor,
194194
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
195-
fill: FillTypeJIT = None,
195+
fill: _FillTypeJIT = None,
196196
) -> BoundingBox:
197197
output = self._F.elastic_bounding_box(
198198
self.as_subclass(torch.Tensor), self.format, self.spatial_size, displacement=displacement

torchvision/datapoints/_datapoint.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111

1212

1313
D = TypeVar("D", bound="Datapoint")
14-
FillType = Union[int, float, Sequence[int], Sequence[float], None]
15-
FillTypeJIT = Optional[List[float]]
14+
_FillType = Union[int, float, Sequence[int], Sequence[float], None]
15+
_FillTypeJIT = Optional[List[float]]
1616

1717

1818
class Datapoint(torch.Tensor):
@@ -181,7 +181,7 @@ def rotate(
181181
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
182182
expand: bool = False,
183183
center: Optional[List[float]] = None,
184-
fill: FillTypeJIT = None,
184+
fill: _FillTypeJIT = None,
185185
) -> Datapoint:
186186
return self
187187

@@ -192,7 +192,7 @@ def affine(
192192
scale: float,
193193
shear: List[float],
194194
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
195-
fill: FillTypeJIT = None,
195+
fill: _FillTypeJIT = None,
196196
center: Optional[List[float]] = None,
197197
) -> Datapoint:
198198
return self
@@ -202,7 +202,7 @@ def perspective(
202202
startpoints: Optional[List[List[int]]],
203203
endpoints: Optional[List[List[int]]],
204204
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
205-
fill: FillTypeJIT = None,
205+
fill: _FillTypeJIT = None,
206206
coefficients: Optional[List[float]] = None,
207207
) -> Datapoint:
208208
return self
@@ -211,7 +211,7 @@ def elastic(
211211
self,
212212
displacement: torch.Tensor,
213213
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
214-
fill: FillTypeJIT = None,
214+
fill: _FillTypeJIT = None,
215215
) -> Datapoint:
216216
return self
217217

@@ -255,5 +255,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N
255255
return self
256256

257257

258-
InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint]
259-
InputTypeJIT = torch.Tensor
258+
_InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint]
259+
_InputTypeJIT = torch.Tensor

torchvision/datapoints/_image.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import torch
77
from torchvision.transforms.functional import InterpolationMode
88

9-
from ._datapoint import Datapoint, FillTypeJIT
9+
from ._datapoint import _FillTypeJIT, Datapoint
1010

1111

1212
class Image(Datapoint):
@@ -116,7 +116,7 @@ def rotate(
116116
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
117117
expand: bool = False,
118118
center: Optional[List[float]] = None,
119-
fill: FillTypeJIT = None,
119+
fill: _FillTypeJIT = None,
120120
) -> Image:
121121
output = self._F.rotate_image_tensor(
122122
self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center
@@ -130,7 +130,7 @@ def affine(
130130
scale: float,
131131
shear: List[float],
132132
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
133-
fill: FillTypeJIT = None,
133+
fill: _FillTypeJIT = None,
134134
center: Optional[List[float]] = None,
135135
) -> Image:
136136
output = self._F.affine_image_tensor(
@@ -150,7 +150,7 @@ def perspective(
150150
startpoints: Optional[List[List[int]]],
151151
endpoints: Optional[List[List[int]]],
152152
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
153-
fill: FillTypeJIT = None,
153+
fill: _FillTypeJIT = None,
154154
coefficients: Optional[List[float]] = None,
155155
) -> Image:
156156
output = self._F.perspective_image_tensor(
@@ -167,7 +167,7 @@ def elastic(
167167
self,
168168
displacement: torch.Tensor,
169169
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
170-
fill: FillTypeJIT = None,
170+
fill: _FillTypeJIT = None,
171171
) -> Image:
172172
output = self._F.elastic_image_tensor(
173173
self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill
@@ -241,7 +241,7 @@ def normalize(self, mean: List[float], std: List[float], inplace: bool = False)
241241
return Image.wrap_like(self, output)
242242

243243

244-
ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
245-
ImageTypeJIT = torch.Tensor
246-
TensorImageType = Union[torch.Tensor, Image]
247-
TensorImageTypeJIT = torch.Tensor
244+
_ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
245+
_ImageTypeJIT = torch.Tensor
246+
_TensorImageType = Union[torch.Tensor, Image]
247+
_TensorImageTypeJIT = torch.Tensor

torchvision/datapoints/_mask.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import torch
77
from torchvision.transforms import InterpolationMode
88

9-
from ._datapoint import Datapoint, FillTypeJIT
9+
from ._datapoint import _FillTypeJIT, Datapoint
1010

1111

1212
class Mask(Datapoint):
@@ -96,7 +96,7 @@ def rotate(
9696
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
9797
expand: bool = False,
9898
center: Optional[List[float]] = None,
99-
fill: FillTypeJIT = None,
99+
fill: _FillTypeJIT = None,
100100
) -> Mask:
101101
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
102102
return Mask.wrap_like(self, output)
@@ -108,7 +108,7 @@ def affine(
108108
scale: float,
109109
shear: List[float],
110110
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
111-
fill: FillTypeJIT = None,
111+
fill: _FillTypeJIT = None,
112112
center: Optional[List[float]] = None,
113113
) -> Mask:
114114
output = self._F.affine_mask(
@@ -127,7 +127,7 @@ def perspective(
127127
startpoints: Optional[List[List[int]]],
128128
endpoints: Optional[List[List[int]]],
129129
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
130-
fill: FillTypeJIT = None,
130+
fill: _FillTypeJIT = None,
131131
coefficients: Optional[List[float]] = None,
132132
) -> Mask:
133133
output = self._F.perspective_mask(
@@ -139,7 +139,7 @@ def elastic(
139139
self,
140140
displacement: torch.Tensor,
141141
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
142-
fill: FillTypeJIT = None,
142+
fill: _FillTypeJIT = None,
143143
) -> Mask:
144144
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
145145
return Mask.wrap_like(self, output)

torchvision/datapoints/_video.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import torch
66
from torchvision.transforms.functional import InterpolationMode
77

8-
from ._datapoint import Datapoint, FillTypeJIT
8+
from ._datapoint import _FillTypeJIT, Datapoint
99

1010

1111
class Video(Datapoint):
@@ -115,7 +115,7 @@ def rotate(
115115
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
116116
expand: bool = False,
117117
center: Optional[List[float]] = None,
118-
fill: FillTypeJIT = None,
118+
fill: _FillTypeJIT = None,
119119
) -> Video:
120120
output = self._F.rotate_video(
121121
self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center
@@ -129,7 +129,7 @@ def affine(
129129
scale: float,
130130
shear: List[float],
131131
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
132-
fill: FillTypeJIT = None,
132+
fill: _FillTypeJIT = None,
133133
center: Optional[List[float]] = None,
134134
) -> Video:
135135
output = self._F.affine_video(
@@ -149,7 +149,7 @@ def perspective(
149149
startpoints: Optional[List[List[int]]],
150150
endpoints: Optional[List[List[int]]],
151151
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
152-
fill: FillTypeJIT = None,
152+
fill: _FillTypeJIT = None,
153153
coefficients: Optional[List[float]] = None,
154154
) -> Video:
155155
output = self._F.perspective_video(
@@ -166,7 +166,7 @@ def elastic(
166166
self,
167167
displacement: torch.Tensor,
168168
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
169-
fill: FillTypeJIT = None,
169+
fill: _FillTypeJIT = None,
170170
) -> Video:
171171
output = self._F.elastic_video(
172172
self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill
@@ -232,7 +232,7 @@ def normalize(self, mean: List[float], std: List[float], inplace: bool = False)
232232
return Video.wrap_like(self, output)
233233

234234

235-
VideoType = Union[torch.Tensor, Video]
236-
VideoTypeJIT = torch.Tensor
237-
TensorVideoType = Union[torch.Tensor, Video]
238-
TensorVideoTypeJIT = torch.Tensor
235+
_VideoType = Union[torch.Tensor, Video]
236+
_VideoTypeJIT = torch.Tensor
237+
_TensorVideoType = Union[torch.Tensor, Video]
238+
_TensorVideoTypeJIT = torch.Tensor

torchvision/prototype/transforms/_augment.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -119,15 +119,15 @@ def __init__(
119119

120120
def _copy_paste(
121121
self,
122-
image: datapoints.TensorImageType,
122+
image: datapoints._TensorImageType,
123123
target: Dict[str, Any],
124-
paste_image: datapoints.TensorImageType,
124+
paste_image: datapoints._TensorImageType,
125125
paste_target: Dict[str, Any],
126126
random_selection: torch.Tensor,
127127
blending: bool,
128128
resize_interpolation: F.InterpolationMode,
129129
antialias: Optional[bool],
130-
) -> Tuple[datapoints.TensorImageType, Dict[str, Any]]:
130+
) -> Tuple[datapoints._TensorImageType, Dict[str, Any]]:
131131

132132
paste_masks = paste_target["masks"].wrap_like(paste_target["masks"], paste_target["masks"][random_selection])
133133
paste_boxes = paste_target["boxes"].wrap_like(paste_target["boxes"], paste_target["boxes"][random_selection])
@@ -199,7 +199,7 @@ def _copy_paste(
199199

200200
def _extract_image_targets(
201201
self, flat_sample: List[Any]
202-
) -> Tuple[List[datapoints.TensorImageType], List[Dict[str, Any]]]:
202+
) -> Tuple[List[datapoints._TensorImageType], List[Dict[str, Any]]]:
203203
# fetch all images, bboxes, masks and labels from unstructured input
204204
# with List[image], List[BoundingBox], List[Mask], List[Label]
205205
images, bboxes, masks, labels = [], [], [], []
@@ -230,7 +230,7 @@ def _extract_image_targets(
230230
def _insert_outputs(
231231
self,
232232
flat_sample: List[Any],
233-
output_images: List[datapoints.TensorImageType],
233+
output_images: List[datapoints._TensorImageType],
234234
output_targets: List[Dict[str, Any]],
235235
) -> None:
236236
c0, c1, c2, c3 = 0, 0, 0, 0

torchvision/prototype/transforms/_geometry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class FixedSizeCrop(Transform):
1414
def __init__(
1515
self,
1616
size: Union[int, Sequence[int]],
17-
fill: Union[datapoints.FillType, Dict[Type, datapoints.FillType]] = 0,
17+
fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0,
1818
padding_mode: str = "constant",
1919
) -> None:
2020
super().__init__()

torchvision/prototype/transforms/_misc.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]
2626
self.dims = dims
2727

2828
def _transform(
29-
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
29+
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
3030
) -> torch.Tensor:
3131
dims = self.dims[type(inpt)]
3232
if dims is None:
@@ -50,7 +50,7 @@ def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, i
5050
self.dims = dims
5151

5252
def _transform(
53-
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
53+
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
5454
) -> torch.Tensor:
5555
dims = self.dims[type(inpt)]
5656
if dims is None:

torchvision/transforms/v2/_augment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,8 @@ def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
9797
return dict(i=i, j=j, h=h, w=w, v=v)
9898

9999
def _transform(
100-
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
101-
) -> Union[datapoints.ImageType, datapoints.VideoType]:
100+
self, inpt: Union[datapoints._ImageType, datapoints._VideoType], params: Dict[str, Any]
101+
) -> Union[datapoints._ImageType, datapoints._VideoType]:
102102
if params["v"] is not None:
103103
inpt = F.erase(inpt, **params, inplace=self.inplace)
104104

0 commit comments

Comments
 (0)