diff --git a/torchvision/prototype/transforms/functional/_augment.py b/torchvision/prototype/transforms/functional/_augment.py index 2eafe0d3c1f..e5e93aa0b4f 100644 --- a/torchvision/prototype/transforms/functional/_augment.py +++ b/torchvision/prototype/transforms/functional/_augment.py @@ -17,7 +17,7 @@ } ) def erase(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -28,7 +28,7 @@ def erase(input: T, *args: Any, **kwargs: Any) -> T: } ) def mixup(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index 23e128b7856..290ae7094ce 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -19,7 +19,7 @@ } ) def adjust_brightness(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -31,7 +31,7 @@ def adjust_brightness(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_saturation(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -43,7 +43,7 @@ def adjust_saturation(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_contrast(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -55,7 +55,7 @@ def adjust_contrast(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_sharpness(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -67,7 +67,7 @@ def adjust_sharpness(input: T, *args: Any, **kwargs: Any) -> T: } ) def posterize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -79,7 +79,7 @@ def posterize(input: T, *args: Any, **kwargs: Any) -> T: } ) def solarize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -91,7 +91,7 @@ def solarize(input: T, *args: Any, **kwargs: Any) -> T: } ) def autocontrast(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -103,7 +103,7 @@ def autocontrast(input: T, *args: Any, **kwargs: Any) -> T: } ) def equalize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -115,5 +115,29 @@ def equalize(input: T, *args: Any, **kwargs: Any) -> T: } ) def invert(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.adjust_hue, + PIL.Image.Image: _F.adjust_hue, + features.Image: K.adjust_hue_image, + } +) +def adjust_hue(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.adjust_gamma, + PIL.Image.Image: _F.adjust_gamma, + features.Image: K.adjust_gamma_image, + } +) +def adjust_gamma(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" ... diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 147baa3a066..ae930bfc5f1 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -20,7 +20,7 @@ }, ) def horizontal_flip(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" if isinstance(input, features.BoundingBox): output = K.horizontal_flip_bounding_box(input, format=input.format, image_size=input.image_size) return cast(T, features.BoundingBox.new_like(input, output)) @@ -38,7 +38,7 @@ def horizontal_flip(input: T, *args: Any, **kwargs: Any) -> T: } ) def resize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" if isinstance(input, features.BoundingBox): size = kwargs.pop("size") output = K.resize_bounding_box(input, size=size, image_size=input.image_size) @@ -55,7 +55,7 @@ def resize(input: T, *args: Any, **kwargs: Any) -> T: } ) def center_crop(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -67,7 +67,7 @@ def center_crop(input: T, *args: Any, **kwargs: Any) -> T: } ) def resized_crop(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -79,7 +79,7 @@ def resized_crop(input: T, *args: Any, **kwargs: Any) -> T: } ) def affine(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -91,5 +91,77 @@ def affine(input: T, *args: Any, **kwargs: Any) -> T: } ) def rotate(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.pad, + PIL.Image.Image: _F.pad, + features.Image: K.pad_image, + } +) +def pad(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.crop, + PIL.Image.Image: _F.crop, + features.Image: K.crop_image, + } +) +def crop(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.perspective, + PIL.Image.Image: _F.perspective, + features.Image: K.perspective_image, + } +) +def perspective(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.vflip, + PIL.Image.Image: _F.vflip, + features.Image: K.vertical_flip_image, + } +) +def vertical_flip(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.five_crop, + PIL.Image.Image: _F.five_crop, + features.Image: K.five_crop_image, + } +) +def five_crop(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.ten_crop, + PIL.Image.Image: _F.ten_crop, + features.Image: K.ten_crop_image, + } +) +def ten_crop(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" ... diff --git a/torchvision/prototype/transforms/functional/_misc.py b/torchvision/prototype/transforms/functional/_misc.py index 7cf0765105a..45e1bdefd3d 100644 --- a/torchvision/prototype/transforms/functional/_misc.py +++ b/torchvision/prototype/transforms/functional/_misc.py @@ -1,5 +1,6 @@ from typing import TypeVar, Any +import PIL.Image import torch from torchvision.prototype import features from torchvision.prototype.transforms import kernels as K @@ -17,5 +18,17 @@ } ) def normalize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" + ... + + +@dispatch( + { + torch.Tensor: _F.gaussian_blur, + PIL.Image.Image: _F.gaussian_blur, + features.Image: K.gaussian_blur_image, + } +) +def ten_gaussian_blur(input: T, *args: Any, **kwargs: Any) -> T: + """TODO: add docstring""" ... diff --git a/torchvision/prototype/transforms/kernels/__init__.py b/torchvision/prototype/transforms/kernels/__init__.py index 6f74f6af0e9..1cac91d29c1 100644 --- a/torchvision/prototype/transforms/kernels/__init__.py +++ b/torchvision/prototype/transforms/kernels/__init__.py @@ -18,6 +18,8 @@ autocontrast_image, equalize_image, invert_image, + adjust_hue_image, + adjust_gamma_image, ) from ._geometry import ( horizontal_flip_bounding_box, @@ -29,6 +31,12 @@ resized_crop_image, affine_image, rotate_image, + pad_image, + crop_image, + perspective_image, + vertical_flip_image, + five_crop_image, + ten_crop_image, ) -from ._misc import normalize_image +from ._misc import normalize_image, gaussian_blur_image from ._type_conversion import decode_image_with_pil, decode_video_with_av, label_to_one_hot diff --git a/torchvision/prototype/transforms/kernels/_color.py b/torchvision/prototype/transforms/kernels/_color.py index 0d828e6d169..00ed5cfbfc7 100644 --- a/torchvision/prototype/transforms/kernels/_color.py +++ b/torchvision/prototype/transforms/kernels/_color.py @@ -10,3 +10,5 @@ autocontrast_image = _F.autocontrast equalize_image = _F.equalize invert_image = _F.invert +adjust_hue_image = _F.adjust_hue +adjust_gamma_image = _F.adjust_gamma diff --git a/torchvision/prototype/transforms/kernels/_geometry.py b/torchvision/prototype/transforms/kernels/_geometry.py index fb25f0fdf47..72afc2e62a3 100644 --- a/torchvision/prototype/transforms/kernels/_geometry.py +++ b/torchvision/prototype/transforms/kernels/_geometry.py @@ -68,3 +68,9 @@ def resize_bounding_box(bounding_box: torch.Tensor, *, size: List[int], image_si resized_crop_image = _F.resized_crop affine_image = _F.affine rotate_image = _F.rotate +pad_image = _F.pad +crop_image = _F.crop +perspective_image = _F.perspective +vertical_flip_image = _F.vflip +five_crop_image = _F.five_crop +ten_crop_image = _F.ten_crop diff --git a/torchvision/prototype/transforms/kernels/_misc.py b/torchvision/prototype/transforms/kernels/_misc.py index de148ab194a..f4e2c69c7ee 100644 --- a/torchvision/prototype/transforms/kernels/_misc.py +++ b/torchvision/prototype/transforms/kernels/_misc.py @@ -2,3 +2,4 @@ normalize_image = _F.normalize +gaussian_blur_image = _F.gaussian_blur