Skip to content

[NOMRG] WIP Use tensor transforms within PIL transforms #3502

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/test_functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ def test_resize(self):

self.assertEqual(
resized_tensor.size()[1:], resized_pil_img.size[::-1],
msg="{}, {}".format(size, interpolation)
msg="{}, {}, {}".format(size, interpolation, max_size)
)

if interpolation not in [NEAREST, ]:
Expand Down
5 changes: 3 additions & 2 deletions torchvision/transforms/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,8 +380,9 @@ def resize(img: Tensor, size: List[int], interpolation: InterpolationMode = Inte
raise TypeError("Argument interpolation should be a InterpolationMode")

if not isinstance(img, torch.Tensor):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
# pil_interpolation = pil_modes_mapping[interpolation]
# return F_pil.resize(img, size=size, interpolation=pil_interpolation)
return F_pil.resize(img, size=size, interpolation=interpolation, max_size=max_size)

return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size)

Expand Down
40 changes: 6 additions & 34 deletions torchvision/transforms/functional_pil.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,41 +204,13 @@ def crop(img: Image.Image, top: int, left: int, height: int, width: int) -> Imag


@torch.jit.unused
def resize(img, size, interpolation=Image.BILINEAR, max_size=None):
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))):
raise TypeError('Got inappropriate size arg: {}'.format(size))

if isinstance(size, Sequence) and len(size) == 1:
size = size[0]
if isinstance(size, int):
w, h = img.size

short, long = (w, h) if w <= h else (h, w)
if short == size:
return img

new_short, new_long = size, int(size * long / short)
def resize(img, size, interpolation=None, max_size=None):
from .functional_tensor import resize as ft_resize
from .functional import pil_to_tensor, to_pil_image

if max_size is not None:
if max_size <= size:
raise ValueError(
f"max_size = {max_size} must be strictly greater than the requested "
f"size for the smaller edge size = {size}"
)
if new_long > max_size:
new_short, new_long = int(max_size * new_short / new_long), max_size

new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
return img.resize((new_w, new_h), interpolation)
else:
if max_size is not None:
raise ValueError(
"max_size should only be passed if size specifies the length of the smaller edge, "
"i.e. size should be an int or a sequence of length 1 in torchscript mode."
)
return img.resize(size[::-1], interpolation)
t = pil_to_tensor(img)
resized_t = ft_resize(t, size, interpolation.value, max_size=max_size)
return to_pil_image(resized_t)


@torch.jit.unused
Expand Down