From cb36ff6f40c0a44a46c0acf378b374481418c36a Mon Sep 17 00:00:00 2001 From: Federico Pozzi Date: Sat, 7 May 2022 10:35:13 +0200 Subject: [PATCH 1/5] feat: add functional center crop on mask --- test/test_prototype_transforms_functional.py | 26 +++++++++++++++++++ .../transforms/functional/__init__.py | 1 + .../transforms/functional/_geometry.py | 4 +++ 3 files changed, 31 insertions(+) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index b24e9a41ff7..139f65c7686 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -1,6 +1,7 @@ import functools import itertools import math +from unittest.mock import patch, Mock import numpy as np import pytest @@ -419,6 +420,12 @@ def center_crop_bounding_box(): yield SampleInput( bounding_box, format=bounding_box.format, output_size=output_size, image_size=bounding_box.image_size ) +def center_crop_segmentation_mask(): + for mask, output_size in itertools.product( + make_segmentation_masks(), + [[4, 3], [42, 70], [4]], # crop sizes < image sizes, crop_sizes > image sizes, single crop size + ): + yield SampleInput(mask, output_size) @pytest.mark.parametrize( @@ -1337,3 +1344,22 @@ def _compute_expected_bbox(bbox, output_size_): else: expected_bboxes = expected_bboxes[0] torch.testing.assert_close(output_boxes, expected_bboxes) +def test_correctness_center_crop_segmentation_mask_on_fixed_input(device): + mask = torch.ones((1, 6, 6), dtype=torch.long, device=device) + mask[:, 1:5, 2:4] = 0 + + out_mask = F.center_crop_segmentation_mask(mask, [2]) + expected_mask = torch.zeros((1, 4, 2), dtype=torch.long, device=device) + torch.testing.assert_close(out_mask, expected_mask) + + +@pytest.mark.parametrize("output_size", [[4, 3], [4], [7, 7]]) +@patch("torchvision.prototype.transforms.functional._geometry.center_crop_image_tensor") +def test_correctness_center_crop_segmentation_mask(center_crop_mock, output_size): + mask, expected = Mock(spec=torch.Tensor), Mock(spec=torch.Tensor) + center_crop_mock.return_value = expected + + out_mask = F.center_crop_segmentation_mask(mask, output_size) + + center_crop_mock.assert_called_once_with(img=mask, output_size=output_size) + assert expected is out_mask diff --git a/torchvision/prototype/transforms/functional/__init__.py b/torchvision/prototype/transforms/functional/__init__.py index cac2946b46e..2a6c7dce516 100644 --- a/torchvision/prototype/transforms/functional/__init__.py +++ b/torchvision/prototype/transforms/functional/__init__.py @@ -46,6 +46,7 @@ resize_image_pil, resize_segmentation_mask, center_crop_bounding_box, + center_crop_segmentation_mask, center_crop_image_tensor, center_crop_image_pil, resized_crop_bounding_box, diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 7d6e26451c9..00c8a59e395 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -630,6 +630,10 @@ def center_crop_bounding_box( return crop_bounding_box(bounding_box, format, top=crop_top, left=crop_left) +def center_crop_segmentation_mask(segmentation_mask: torch.Tensor, output_size: List[int]) -> torch.Tensor: + return center_crop_image_tensor(img=segmentation_mask, output_size=output_size) + + def resized_crop_image_tensor( img: torch.Tensor, top: int, From 9e2739bb5229b1dcb22e7fd471ab11133cf1073d Mon Sep 17 00:00:00 2001 From: Federico Pozzi Date: Tue, 10 May 2022 21:31:11 +0200 Subject: [PATCH 2/5] test: add correctness center crop with random segmentation mask --- test/test_prototype_transforms_functional.py | 24 +++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index 139f65c7686..c02d04af4b3 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -420,6 +420,8 @@ def center_crop_bounding_box(): yield SampleInput( bounding_box, format=bounding_box.format, output_size=output_size, image_size=bounding_box.image_size ) + + def center_crop_segmentation_mask(): for mask, output_size in itertools.product( make_segmentation_masks(), @@ -1344,6 +1346,8 @@ def _compute_expected_bbox(bbox, output_size_): else: expected_bboxes = expected_bboxes[0] torch.testing.assert_close(output_boxes, expected_bboxes) + + def test_correctness_center_crop_segmentation_mask_on_fixed_input(device): mask = torch.ones((1, 6, 6), dtype=torch.long, device=device) mask[:, 1:5, 2:4] = 0 @@ -1353,9 +1357,27 @@ def test_correctness_center_crop_segmentation_mask_on_fixed_input(device): torch.testing.assert_close(out_mask, expected_mask) +@pytest.mark.parametrize("output_size", [[4, 3], [4]]) +def test_correctness_center_crop_segmentation_mask(output_size): + def _compute_expected_segmentation_mask(): + _output_size = output_size if isinstance(output_size, tuple) else (output_size, output_size) + + _, h, w = mask.shape + left = w - _output_size[0] + top = h - _output_size[1] + + return mask[:, top : _output_size[1], left : _output_size[0]] + + mask = torch.randint(0, 2, shape=(1, 6, 6)) + actual = F.center_crop_segmentation_mask(mask, output_size) + + expected = _compute_expected_segmentation_mask() + assert expected == actual + + @pytest.mark.parametrize("output_size", [[4, 3], [4], [7, 7]]) @patch("torchvision.prototype.transforms.functional._geometry.center_crop_image_tensor") -def test_correctness_center_crop_segmentation_mask(center_crop_mock, output_size): +def test_correctness_center_crop_segmentation_mask_mock(center_crop_mock, output_size): mask, expected = Mock(spec=torch.Tensor), Mock(spec=torch.Tensor) center_crop_mock.return_value = expected From 9a9497804dff517bf4ea1f144701d66ab6da5840 Mon Sep 17 00:00:00 2001 From: Federico Pozzi Date: Thu, 12 May 2022 21:35:30 +0200 Subject: [PATCH 3/5] test: improvements --- test/test_prototype_transforms_functional.py | 43 ++++---------------- 1 file changed, 8 insertions(+), 35 deletions(-) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index c02d04af4b3..627c3502e62 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -1,7 +1,6 @@ import functools import itertools import math -from unittest.mock import patch, Mock import numpy as np import pytest @@ -1348,40 +1347,14 @@ def _compute_expected_bbox(bbox, output_size_): torch.testing.assert_close(output_boxes, expected_bboxes) -def test_correctness_center_crop_segmentation_mask_on_fixed_input(device): - mask = torch.ones((1, 6, 6), dtype=torch.long, device=device) - mask[:, 1:5, 2:4] = 0 - - out_mask = F.center_crop_segmentation_mask(mask, [2]) - expected_mask = torch.zeros((1, 4, 2), dtype=torch.long, device=device) - torch.testing.assert_close(out_mask, expected_mask) - - -@pytest.mark.parametrize("output_size", [[4, 3], [4]]) -def test_correctness_center_crop_segmentation_mask(output_size): - def _compute_expected_segmentation_mask(): - _output_size = output_size if isinstance(output_size, tuple) else (output_size, output_size) - - _, h, w = mask.shape - left = w - _output_size[0] - top = h - _output_size[1] - - return mask[:, top : _output_size[1], left : _output_size[0]] - - mask = torch.randint(0, 2, shape=(1, 6, 6)) - actual = F.center_crop_segmentation_mask(mask, output_size) - - expected = _compute_expected_segmentation_mask() - assert expected == actual - - +@pytest.mark.parametrize("device", cpu_and_gpu()) @pytest.mark.parametrize("output_size", [[4, 3], [4], [7, 7]]) -@patch("torchvision.prototype.transforms.functional._geometry.center_crop_image_tensor") -def test_correctness_center_crop_segmentation_mask_mock(center_crop_mock, output_size): - mask, expected = Mock(spec=torch.Tensor), Mock(spec=torch.Tensor) - center_crop_mock.return_value = expected +def test_correctness_center_crop_segmentation_mask(device, output_size): + def _compute_expected_segmentation_mask(mask, output_size): + return F.center_crop_image_tensor(mask, output_size) - out_mask = F.center_crop_segmentation_mask(mask, output_size) + mask = torch.randint(0, 2, size=(1, 6, 6), dtype=torch.long, device=device) + actual = F.center_crop_segmentation_mask(mask, output_size) - center_crop_mock.assert_called_once_with(img=mask, output_size=output_size) - assert expected is out_mask + expected = _compute_expected_segmentation_mask(mask, output_size) + torch.testing.assert_close(expected, actual) From 802b9b4e55bb0748e0127e4c979d5ef63465e72a Mon Sep 17 00:00:00 2001 From: Federico Pozzi Date: Sat, 21 May 2022 10:01:38 +0200 Subject: [PATCH 4/5] test: improvements --- test/test_prototype_transforms_functional.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index 627c3502e62..64bff612ca9 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -10,11 +10,11 @@ from torch import jit from torch.nn.functional import one_hot from torchvision.prototype import features +from torchvision.prototype.transforms.functional._geometry import _center_crop_compute_padding from torchvision.prototype.transforms.functional._meta import convert_bounding_box_format from torchvision.transforms.functional import _get_perspective_coeffs from torchvision.transforms.functional_tensor import _max_value as get_max_value - make_tensor = functools.partial(torch.testing.make_tensor, device="cpu") @@ -423,7 +423,7 @@ def center_crop_bounding_box(): def center_crop_segmentation_mask(): for mask, output_size in itertools.product( - make_segmentation_masks(), + make_segmentation_masks(image_sizes=((16, 16), (7, 33), (31, 9)), extra_dims=((), (4,), (2, 3))), [[4, 3], [42, 70], [4]], # crop sizes < image sizes, crop_sizes > image sizes, single crop size ): yield SampleInput(mask, output_size) @@ -1348,10 +1348,20 @@ def _compute_expected_bbox(bbox, output_size_): @pytest.mark.parametrize("device", cpu_and_gpu()) -@pytest.mark.parametrize("output_size", [[4, 3], [4], [7, 7]]) +@pytest.mark.parametrize("output_size", [[4, 2], [4], [7, 6]]) def test_correctness_center_crop_segmentation_mask(device, output_size): def _compute_expected_segmentation_mask(mask, output_size): - return F.center_crop_image_tensor(mask, output_size) + crop_height, crop_width = output_size if len(output_size) > 1 else [output_size[0], output_size[0]] + + _, image_height, image_width = mask.shape + if crop_width > image_height or crop_height > image_width: + padding = _center_crop_compute_padding(crop_height, crop_width, image_height, image_width) + mask = F.pad_image_tensor(mask, padding, fill=0) + + left = round((image_width - crop_width) * 0.5) + top = round((image_height - crop_height) * 0.5) + + return mask[:, top : top + crop_height, left : left + crop_width] mask = torch.randint(0, 2, size=(1, 6, 6), dtype=torch.long, device=device) actual = F.center_crop_segmentation_mask(mask, output_size) From 97cfbf884f2a501f585351490d8e6b8a98c386d8 Mon Sep 17 00:00:00 2001 From: vfdev Date: Mon, 23 May 2022 22:41:03 +0200 Subject: [PATCH 5/5] Apply suggestions from code review Co-authored-by: Philip Meier --- test/test_prototype_transforms_functional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index 64bff612ca9..e0376eeb5c6 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -423,7 +423,7 @@ def center_crop_bounding_box(): def center_crop_segmentation_mask(): for mask, output_size in itertools.product( - make_segmentation_masks(image_sizes=((16, 16), (7, 33), (31, 9)), extra_dims=((), (4,), (2, 3))), + make_segmentation_masks(image_sizes=((16, 16), (7, 33), (31, 9))), [[4, 3], [42, 70], [4]], # crop sizes < image sizes, crop_sizes > image sizes, single crop size ): yield SampleInput(mask, output_size)