Skip to content

Commit 30b879f

Browse files
authored
Cleanup prototype kernel signatures (#6648)
* pass metadata directly after input in prototype kernels * rename img to image
1 parent dc07ac2 commit 30b879f

File tree

5 files changed

+108
-108
lines changed

5 files changed

+108
-108
lines changed

test/test_prototype_transforms_functional.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@ def _compute_expected_bbox(bbox, padding_):
632632
bboxes_format = bboxes.format
633633
bboxes_image_size = bboxes.image_size
634634

635-
output_boxes = F.pad_bounding_box(bboxes, padding, format=bboxes_format)
635+
output_boxes = F.pad_bounding_box(bboxes, format=bboxes_format, padding=padding)
636636

637637
if bboxes.ndim < 2 or bboxes.shape[0] == 0:
638638
bboxes = [bboxes]
@@ -781,7 +781,7 @@ def _compute_expected_bbox(bbox, output_size_):
781781
bboxes_format = bboxes.format
782782
bboxes_image_size = bboxes.image_size
783783

784-
output_boxes = F.center_crop_bounding_box(bboxes, bboxes_format, output_size, bboxes_image_size)
784+
output_boxes = F.center_crop_bounding_box(bboxes, bboxes_format, bboxes_image_size, output_size)
785785

786786
if bboxes.ndim < 2:
787787
bboxes = [bboxes]

torchvision/prototype/features/_bounding_box.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def resize( # type: ignore[override]
8383
max_size: Optional[int] = None,
8484
antialias: bool = False,
8585
) -> BoundingBox:
86-
output = self._F.resize_bounding_box(self, size, image_size=self.image_size, max_size=max_size)
86+
output = self._F.resize_bounding_box(self, image_size=self.image_size, size=size, max_size=max_size)
8787
if isinstance(size, int):
8888
size = [size]
8989
image_size = (size[0], size[0]) if len(size) == 1 else (size[0], size[1])
@@ -95,7 +95,7 @@ def crop(self, top: int, left: int, height: int, width: int) -> BoundingBox:
9595

9696
def center_crop(self, output_size: List[int]) -> BoundingBox:
9797
output = self._F.center_crop_bounding_box(
98-
self, format=self.format, output_size=output_size, image_size=self.image_size
98+
self, format=self.format, image_size=self.image_size, output_size=output_size
9999
)
100100
if isinstance(output_size, int):
101101
output_size = [output_size]
@@ -126,7 +126,7 @@ def pad(
126126
if not isinstance(padding, int):
127127
padding = list(padding)
128128

129-
output = self._F.pad_bounding_box(self, padding, format=self.format, padding_mode=padding_mode)
129+
output = self._F.pad_bounding_box(self, format=self.format, padding=padding, padding_mode=padding_mode)
130130

131131
# Update output image size:
132132
left, right, top, bottom = self._F._geometry._parse_pad_padding(padding)

torchvision/prototype/transforms/functional/_augment.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,11 @@
1010

1111
@torch.jit.unused
1212
def erase_image_pil(
13-
img: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
13+
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
1414
) -> PIL.Image.Image:
15-
t_img = pil_to_tensor(img)
15+
t_img = pil_to_tensor(image)
1616
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
17-
return to_pil_image(output, mode=img.mode)
17+
return to_pil_image(output, mode=image.mode)
1818

1919

2020
def erase(

0 commit comments

Comments
 (0)