Skip to content

make type alias private #7266

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions torchvision/datapoints/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
Comment on lines +2 to +5
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the change with the smallest diff. We could also remove them from this namespace completely and just import from the respective module directly where we need them. If we do that, should we still prefix them with an underscore? Just removing them here would also mark them private, since the module there are defined in is private as well.


from ._dataset_wrapper import wrap_dataset_for_transforms_v2 # type: ignore[attr-defined] # usort: skip
10 changes: 5 additions & 5 deletions torchvision/datapoints/_bounding_box.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from torchvision._utils import StrEnum
from torchvision.transforms import InterpolationMode # TODO: this needs to be moved out of transforms

from ._datapoint import Datapoint, FillTypeJIT
from ._datapoint import _FillTypeJIT, Datapoint


class BoundingBoxFormat(StrEnum):
Expand Down Expand Up @@ -136,7 +136,7 @@ def rotate(
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> BoundingBox:
output, spatial_size = self._F.rotate_bounding_box(
self.as_subclass(torch.Tensor),
Expand All @@ -155,7 +155,7 @@ def affine(
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> BoundingBox:
output = self._F.affine_bounding_box(
Expand All @@ -175,7 +175,7 @@ def perspective(
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> BoundingBox:
output = self._F.perspective_bounding_box(
Expand All @@ -192,7 +192,7 @@ def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> BoundingBox:
output = self._F.elastic_bounding_box(
self.as_subclass(torch.Tensor), self.format, self.spatial_size, displacement=displacement
Expand Down
16 changes: 8 additions & 8 deletions torchvision/datapoints/_datapoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@


D = TypeVar("D", bound="Datapoint")
FillType = Union[int, float, Sequence[int], Sequence[float], None]
FillTypeJIT = Optional[List[float]]
_FillType = Union[int, float, Sequence[int], Sequence[float], None]
_FillTypeJIT = Optional[List[float]]


class Datapoint(torch.Tensor):
Expand Down Expand Up @@ -181,7 +181,7 @@ def rotate(
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Datapoint:
return self

Expand All @@ -192,7 +192,7 @@ def affine(
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Datapoint:
return self
Expand All @@ -202,7 +202,7 @@ def perspective(
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Datapoint:
return self
Expand All @@ -211,7 +211,7 @@ def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Datapoint:
return self

Expand Down Expand Up @@ -255,5 +255,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N
return self


InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint]
InputTypeJIT = torch.Tensor
_InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint]
_InputTypeJIT = torch.Tensor
18 changes: 9 additions & 9 deletions torchvision/datapoints/_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torch
from torchvision.transforms.functional import InterpolationMode

from ._datapoint import Datapoint, FillTypeJIT
from ._datapoint import _FillTypeJIT, Datapoint


class Image(Datapoint):
Expand Down Expand Up @@ -116,7 +116,7 @@ def rotate(
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Image:
output = self._F.rotate_image_tensor(
self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center
Expand All @@ -130,7 +130,7 @@ def affine(
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Image:
output = self._F.affine_image_tensor(
Expand All @@ -150,7 +150,7 @@ def perspective(
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Image:
output = self._F.perspective_image_tensor(
Expand All @@ -167,7 +167,7 @@ def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Image:
output = self._F.elastic_image_tensor(
self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill
Expand Down Expand Up @@ -241,7 +241,7 @@ def normalize(self, mean: List[float], std: List[float], inplace: bool = False)
return Image.wrap_like(self, output)


ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
ImageTypeJIT = torch.Tensor
TensorImageType = Union[torch.Tensor, Image]
TensorImageTypeJIT = torch.Tensor
_ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
_ImageTypeJIT = torch.Tensor
_TensorImageType = Union[torch.Tensor, Image]
_TensorImageTypeJIT = torch.Tensor
10 changes: 5 additions & 5 deletions torchvision/datapoints/_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torch
from torchvision.transforms import InterpolationMode

from ._datapoint import Datapoint, FillTypeJIT
from ._datapoint import _FillTypeJIT, Datapoint


class Mask(Datapoint):
Expand Down Expand Up @@ -96,7 +96,7 @@ def rotate(
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
Expand All @@ -108,7 +108,7 @@ def affine(
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
Expand All @@ -127,7 +127,7 @@ def perspective(
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
Expand All @@ -139,7 +139,7 @@ def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
18 changes: 9 additions & 9 deletions torchvision/datapoints/_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch
from torchvision.transforms.functional import InterpolationMode

from ._datapoint import Datapoint, FillTypeJIT
from ._datapoint import _FillTypeJIT, Datapoint


class Video(Datapoint):
Expand Down Expand Up @@ -115,7 +115,7 @@ def rotate(
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Video:
output = self._F.rotate_video(
self.as_subclass(torch.Tensor), angle, interpolation=interpolation, expand=expand, fill=fill, center=center
Expand All @@ -129,7 +129,7 @@ def affine(
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Video:
output = self._F.affine_video(
Expand All @@ -149,7 +149,7 @@ def perspective(
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Video:
output = self._F.perspective_video(
Expand All @@ -166,7 +166,7 @@ def elastic(
self,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: FillTypeJIT = None,
fill: _FillTypeJIT = None,
) -> Video:
output = self._F.elastic_video(
self.as_subclass(torch.Tensor), displacement, interpolation=interpolation, fill=fill
Expand Down Expand Up @@ -232,7 +232,7 @@ def normalize(self, mean: List[float], std: List[float], inplace: bool = False)
return Video.wrap_like(self, output)


VideoType = Union[torch.Tensor, Video]
VideoTypeJIT = torch.Tensor
TensorVideoType = Union[torch.Tensor, Video]
TensorVideoTypeJIT = torch.Tensor
_VideoType = Union[torch.Tensor, Video]
_VideoTypeJIT = torch.Tensor
_TensorVideoType = Union[torch.Tensor, Video]
_TensorVideoTypeJIT = torch.Tensor
10 changes: 5 additions & 5 deletions torchvision/prototype/transforms/_augment.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,15 +119,15 @@ def __init__(

def _copy_paste(
self,
image: datapoints.TensorImageType,
image: datapoints._TensorImageType,
target: Dict[str, Any],
paste_image: datapoints.TensorImageType,
paste_image: datapoints._TensorImageType,
paste_target: Dict[str, Any],
random_selection: torch.Tensor,
blending: bool,
resize_interpolation: F.InterpolationMode,
antialias: Optional[bool],
) -> Tuple[datapoints.TensorImageType, Dict[str, Any]]:
) -> Tuple[datapoints._TensorImageType, Dict[str, Any]]:

paste_masks = paste_target["masks"].wrap_like(paste_target["masks"], paste_target["masks"][random_selection])
paste_boxes = paste_target["boxes"].wrap_like(paste_target["boxes"], paste_target["boxes"][random_selection])
Expand Down Expand Up @@ -199,7 +199,7 @@ def _copy_paste(

def _extract_image_targets(
self, flat_sample: List[Any]
) -> Tuple[List[datapoints.TensorImageType], List[Dict[str, Any]]]:
) -> Tuple[List[datapoints._TensorImageType], List[Dict[str, Any]]]:
# fetch all images, bboxes, masks and labels from unstructured input
# with List[image], List[BoundingBox], List[Mask], List[Label]
images, bboxes, masks, labels = [], [], [], []
Expand Down Expand Up @@ -230,7 +230,7 @@ def _extract_image_targets(
def _insert_outputs(
self,
flat_sample: List[Any],
output_images: List[datapoints.TensorImageType],
output_images: List[datapoints._TensorImageType],
output_targets: List[Dict[str, Any]],
) -> None:
c0, c1, c2, c3 = 0, 0, 0, 0
Expand Down
2 changes: 1 addition & 1 deletion torchvision/prototype/transforms/_geometry.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints.FillType, Dict[Type, datapoints.FillType]] = 0,
fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
Expand Down
4 changes: 2 additions & 2 deletions torchvision/prototype/transforms/_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]
self.dims = dims

def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
Expand All @@ -50,7 +50,7 @@ def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, i
self.dims = dims

def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
Expand Down
4 changes: 2 additions & 2 deletions torchvision/transforms/v2/_augment.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
return dict(i=i, j=j, h=h, w=w, v=v)

def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
self, inpt: Union[datapoints._ImageType, datapoints._VideoType], params: Dict[str, Any]
) -> Union[datapoints._ImageType, datapoints._VideoType]:
if params["v"] is not None:
inpt = F.erase(inpt, **params, inplace=self.inplace)

Expand Down
Loading