Skip to content

Commit d646c40

Browse files
kazhangfacebook-github-bot
authored andcommitted
[fbsync] Cleanup prototype kernel dispatchers (#5417)
Summary: * add all legacy kernels * clarify dispatcher docstrings Reviewed By: sallysyw Differential Revision: D34265744 fbshipit-source-id: f1654ce8796ea0e5206448234a3707b250ae7b96
1 parent 9ed1443 commit d646c40

File tree

8 files changed

+145
-19
lines changed

8 files changed

+145
-19
lines changed

torchvision/prototype/transforms/functional/_augment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
}
1818
)
1919
def erase(input: T, *args: Any, **kwargs: Any) -> T:
20-
"""ADDME"""
20+
"""TODO: add docstring"""
2121
...
2222

2323

@@ -28,7 +28,7 @@ def erase(input: T, *args: Any, **kwargs: Any) -> T:
2828
}
2929
)
3030
def mixup(input: T, *args: Any, **kwargs: Any) -> T:
31-
"""ADDME"""
31+
"""TODO: add docstring"""
3232
...
3333

3434

torchvision/prototype/transforms/functional/_color.py

Lines changed: 33 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
}
2020
)
2121
def adjust_brightness(input: T, *args: Any, **kwargs: Any) -> T:
22-
"""ADDME"""
22+
"""TODO: add docstring"""
2323
...
2424

2525

@@ -31,7 +31,7 @@ def adjust_brightness(input: T, *args: Any, **kwargs: Any) -> T:
3131
}
3232
)
3333
def adjust_saturation(input: T, *args: Any, **kwargs: Any) -> T:
34-
"""ADDME"""
34+
"""TODO: add docstring"""
3535
...
3636

3737

@@ -43,7 +43,7 @@ def adjust_saturation(input: T, *args: Any, **kwargs: Any) -> T:
4343
}
4444
)
4545
def adjust_contrast(input: T, *args: Any, **kwargs: Any) -> T:
46-
"""ADDME"""
46+
"""TODO: add docstring"""
4747
...
4848

4949

@@ -55,7 +55,7 @@ def adjust_contrast(input: T, *args: Any, **kwargs: Any) -> T:
5555
}
5656
)
5757
def adjust_sharpness(input: T, *args: Any, **kwargs: Any) -> T:
58-
"""ADDME"""
58+
"""TODO: add docstring"""
5959
...
6060

6161

@@ -67,7 +67,7 @@ def adjust_sharpness(input: T, *args: Any, **kwargs: Any) -> T:
6767
}
6868
)
6969
def posterize(input: T, *args: Any, **kwargs: Any) -> T:
70-
"""ADDME"""
70+
"""TODO: add docstring"""
7171
...
7272

7373

@@ -79,7 +79,7 @@ def posterize(input: T, *args: Any, **kwargs: Any) -> T:
7979
}
8080
)
8181
def solarize(input: T, *args: Any, **kwargs: Any) -> T:
82-
"""ADDME"""
82+
"""TODO: add docstring"""
8383
...
8484

8585

@@ -91,7 +91,7 @@ def solarize(input: T, *args: Any, **kwargs: Any) -> T:
9191
}
9292
)
9393
def autocontrast(input: T, *args: Any, **kwargs: Any) -> T:
94-
"""ADDME"""
94+
"""TODO: add docstring"""
9595
...
9696

9797

@@ -103,7 +103,7 @@ def autocontrast(input: T, *args: Any, **kwargs: Any) -> T:
103103
}
104104
)
105105
def equalize(input: T, *args: Any, **kwargs: Any) -> T:
106-
"""ADDME"""
106+
"""TODO: add docstring"""
107107
...
108108

109109

@@ -115,5 +115,29 @@ def equalize(input: T, *args: Any, **kwargs: Any) -> T:
115115
}
116116
)
117117
def invert(input: T, *args: Any, **kwargs: Any) -> T:
118-
"""ADDME"""
118+
"""TODO: add docstring"""
119+
...
120+
121+
122+
@dispatch(
123+
{
124+
torch.Tensor: _F.adjust_hue,
125+
PIL.Image.Image: _F.adjust_hue,
126+
features.Image: K.adjust_hue_image,
127+
}
128+
)
129+
def adjust_hue(input: T, *args: Any, **kwargs: Any) -> T:
130+
"""TODO: add docstring"""
131+
...
132+
133+
134+
@dispatch(
135+
{
136+
torch.Tensor: _F.adjust_gamma,
137+
PIL.Image.Image: _F.adjust_gamma,
138+
features.Image: K.adjust_gamma_image,
139+
}
140+
)
141+
def adjust_gamma(input: T, *args: Any, **kwargs: Any) -> T:
142+
"""TODO: add docstring"""
119143
...

torchvision/prototype/transforms/functional/_geometry.py

Lines changed: 78 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
},
2121
)
2222
def horizontal_flip(input: T, *args: Any, **kwargs: Any) -> T:
23-
"""ADDME"""
23+
"""TODO: add docstring"""
2424
if isinstance(input, features.BoundingBox):
2525
output = K.horizontal_flip_bounding_box(input, format=input.format, image_size=input.image_size)
2626
return cast(T, features.BoundingBox.new_like(input, output))
@@ -38,7 +38,7 @@ def horizontal_flip(input: T, *args: Any, **kwargs: Any) -> T:
3838
}
3939
)
4040
def resize(input: T, *args: Any, **kwargs: Any) -> T:
41-
"""ADDME"""
41+
"""TODO: add docstring"""
4242
if isinstance(input, features.BoundingBox):
4343
size = kwargs.pop("size")
4444
output = K.resize_bounding_box(input, size=size, image_size=input.image_size)
@@ -55,7 +55,7 @@ def resize(input: T, *args: Any, **kwargs: Any) -> T:
5555
}
5656
)
5757
def center_crop(input: T, *args: Any, **kwargs: Any) -> T:
58-
"""ADDME"""
58+
"""TODO: add docstring"""
5959
...
6060

6161

@@ -67,7 +67,7 @@ def center_crop(input: T, *args: Any, **kwargs: Any) -> T:
6767
}
6868
)
6969
def resized_crop(input: T, *args: Any, **kwargs: Any) -> T:
70-
"""ADDME"""
70+
"""TODO: add docstring"""
7171
...
7272

7373

@@ -79,7 +79,7 @@ def resized_crop(input: T, *args: Any, **kwargs: Any) -> T:
7979
}
8080
)
8181
def affine(input: T, *args: Any, **kwargs: Any) -> T:
82-
"""ADDME"""
82+
"""TODO: add docstring"""
8383
...
8484

8585

@@ -91,5 +91,77 @@ def affine(input: T, *args: Any, **kwargs: Any) -> T:
9191
}
9292
)
9393
def rotate(input: T, *args: Any, **kwargs: Any) -> T:
94-
"""ADDME"""
94+
"""TODO: add docstring"""
95+
...
96+
97+
98+
@dispatch(
99+
{
100+
torch.Tensor: _F.pad,
101+
PIL.Image.Image: _F.pad,
102+
features.Image: K.pad_image,
103+
}
104+
)
105+
def pad(input: T, *args: Any, **kwargs: Any) -> T:
106+
"""TODO: add docstring"""
107+
...
108+
109+
110+
@dispatch(
111+
{
112+
torch.Tensor: _F.crop,
113+
PIL.Image.Image: _F.crop,
114+
features.Image: K.crop_image,
115+
}
116+
)
117+
def crop(input: T, *args: Any, **kwargs: Any) -> T:
118+
"""TODO: add docstring"""
119+
...
120+
121+
122+
@dispatch(
123+
{
124+
torch.Tensor: _F.perspective,
125+
PIL.Image.Image: _F.perspective,
126+
features.Image: K.perspective_image,
127+
}
128+
)
129+
def perspective(input: T, *args: Any, **kwargs: Any) -> T:
130+
"""TODO: add docstring"""
131+
...
132+
133+
134+
@dispatch(
135+
{
136+
torch.Tensor: _F.vflip,
137+
PIL.Image.Image: _F.vflip,
138+
features.Image: K.vertical_flip_image,
139+
}
140+
)
141+
def vertical_flip(input: T, *args: Any, **kwargs: Any) -> T:
142+
"""TODO: add docstring"""
143+
...
144+
145+
146+
@dispatch(
147+
{
148+
torch.Tensor: _F.five_crop,
149+
PIL.Image.Image: _F.five_crop,
150+
features.Image: K.five_crop_image,
151+
}
152+
)
153+
def five_crop(input: T, *args: Any, **kwargs: Any) -> T:
154+
"""TODO: add docstring"""
155+
...
156+
157+
158+
@dispatch(
159+
{
160+
torch.Tensor: _F.ten_crop,
161+
PIL.Image.Image: _F.ten_crop,
162+
features.Image: K.ten_crop_image,
163+
}
164+
)
165+
def ten_crop(input: T, *args: Any, **kwargs: Any) -> T:
166+
"""TODO: add docstring"""
95167
...

torchvision/prototype/transforms/functional/_misc.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from typing import TypeVar, Any
22

3+
import PIL.Image
34
import torch
45
from torchvision.prototype import features
56
from torchvision.prototype.transforms import kernels as K
@@ -17,5 +18,17 @@
1718
}
1819
)
1920
def normalize(input: T, *args: Any, **kwargs: Any) -> T:
20-
"""ADDME"""
21+
"""TODO: add docstring"""
22+
...
23+
24+
25+
@dispatch(
26+
{
27+
torch.Tensor: _F.gaussian_blur,
28+
PIL.Image.Image: _F.gaussian_blur,
29+
features.Image: K.gaussian_blur_image,
30+
}
31+
)
32+
def ten_gaussian_blur(input: T, *args: Any, **kwargs: Any) -> T:
33+
"""TODO: add docstring"""
2134
...

torchvision/prototype/transforms/kernels/__init__.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
autocontrast_image,
1919
equalize_image,
2020
invert_image,
21+
adjust_hue_image,
22+
adjust_gamma_image,
2123
)
2224
from ._geometry import (
2325
horizontal_flip_bounding_box,
@@ -29,6 +31,12 @@
2931
resized_crop_image,
3032
affine_image,
3133
rotate_image,
34+
pad_image,
35+
crop_image,
36+
perspective_image,
37+
vertical_flip_image,
38+
five_crop_image,
39+
ten_crop_image,
3240
)
33-
from ._misc import normalize_image
41+
from ._misc import normalize_image, gaussian_blur_image
3442
from ._type_conversion import decode_image_with_pil, decode_video_with_av, label_to_one_hot

torchvision/prototype/transforms/kernels/_color.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,5 @@
1010
autocontrast_image = _F.autocontrast
1111
equalize_image = _F.equalize
1212
invert_image = _F.invert
13+
adjust_hue_image = _F.adjust_hue
14+
adjust_gamma_image = _F.adjust_gamma

torchvision/prototype/transforms/kernels/_geometry.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,3 +68,9 @@ def resize_bounding_box(bounding_box: torch.Tensor, *, size: List[int], image_si
6868
resized_crop_image = _F.resized_crop
6969
affine_image = _F.affine
7070
rotate_image = _F.rotate
71+
pad_image = _F.pad
72+
crop_image = _F.crop
73+
perspective_image = _F.perspective
74+
vertical_flip_image = _F.vflip
75+
five_crop_image = _F.five_crop
76+
ten_crop_image = _F.ten_crop

torchvision/prototype/transforms/kernels/_misc.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,4 @@
22

33

44
normalize_image = _F.normalize
5+
gaussian_blur_image = _F.gaussian_blur

0 commit comments

Comments
 (0)