From 925ec6267e97a3b61d636abf9fa6ced676d4bee0 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Fri, 24 Feb 2023 17:03:29 +0100 Subject: [PATCH 1/2] add gallery for transforms v2 --- gallery/plot_transforms_v2.py | 107 ++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 gallery/plot_transforms_v2.py diff --git a/gallery/plot_transforms_v2.py b/gallery/plot_transforms_v2.py new file mode 100644 index 00000000000..eade460d8f8 --- /dev/null +++ b/gallery/plot_transforms_v2.py @@ -0,0 +1,107 @@ +""" +============= +Transforms v2 +============= + +Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports +images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This +example showcases the core functionality of the new ``torchvision.transforms.v2`` v2 API. +""" + +import pathlib + +import torch +import torchvision + + +def load_data(): + from torchvision.io import read_image + from torchvision import datapoints + from torchvision.ops import masks_to_boxes + + assets_directory = pathlib.Path("assets") + + path = assets_directory / "FudanPed00054.png" + image = datapoints.Image(read_image(str(path))) + merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png")) + + labels = torch.unique(merged_masks)[1:] + + masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1)) + + bounding_boxes = datapoints.BoundingBox( + masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, spatial_size=image.shape[-2:] + ) + + return path, image, bounding_boxes, masks, labels + + +######################################################################################################################## +# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation +# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object +# detection or instance and semantic segmentation. Still, the interface is the same, making +# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1. + +# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that +# some APIs may slightly change in the future +torchvision.disable_beta_transforms_warning() +import torchvision.transforms.v2 as transforms + +transform = transforms.Compose( + [ + transforms.ColorJitter(contrast=0.5), + transforms.RandomRotation(30), + transforms.CenterCrop(480), + ] +) + +######################################################################################################################## +# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that +# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or +# order. + +path, image, bounding_boxes, masks, labels = load_data() + +torch.manual_seed(0) +transform(image) # Image Classification +transform(image, bounding_boxes, labels) # Object Detection +transform(image, bounding_boxes, masks, labels) # Instance Segmentation +transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure + +######################################################################################################################## +# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the +# appropriate function for the input data: :ref:`sphx_glr_auto_examples_plot_datapoints.py`. Note however, that as +# regular user, you likely don't have to touch this yourself. See +# :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`. +# +# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra +# information directly with the sample: + +sample = {"path": path, "image": image} +transformed_sample = transform(sample) + +assert transformed_sample["path"] is sample["path"] + +######################################################################################################################## +# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus +# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a +# simple heuristic: +# +# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`, +# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through. +# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or +# video, while all others will be passed through. + +plain_tensor_image = torch.rand(image.shape) + +print(image.shape, plain_tensor_image.shape) + +# passing a plain tensor together with an explicit image, will not transform the former +plain_tensor_image, image = transform(plain_tensor_image, image) + +print(image.shape, plain_tensor_image.shape) + +# passing a plain tensor without an explicit image, will transform the former +plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes) + +print(image.shape, plain_tensor_image.shape) From 51fd98f93ddc277b8cf553e0f1a51560babc0e22 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Fri, 24 Feb 2023 17:28:03 +0100 Subject: [PATCH 2/2] address comments --- gallery/plot_transforms_v2.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/gallery/plot_transforms_v2.py b/gallery/plot_transforms_v2.py index eade460d8f8..d1096bec1e7 100644 --- a/gallery/plot_transforms_v2.py +++ b/gallery/plot_transforms_v2.py @@ -1,11 +1,11 @@ """ -============= -Transforms v2 -============= +================================== +Getting started with transforms v2 +================================== Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This -example showcases the core functionality of the new ``torchvision.transforms.v2`` v2 API. +example showcases the core functionality of the new ``torchvision.transforms.v2`` API. """ import pathlib @@ -63,10 +63,12 @@ def load_data(): path, image, bounding_boxes, masks, labels = load_data() torch.manual_seed(0) -transform(image) # Image Classification -transform(image, bounding_boxes, labels) # Object Detection -transform(image, bounding_boxes, masks, labels) # Instance Segmentation -transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure +new_image = transform(image) # Image Classification +new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection +new_image, new_bounding_boxes, new_masks, new_labels = transform( + image, bounding_boxes, masks, labels +) # Instance Segmentation +new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure ######################################################################################################################## # Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the @@ -78,9 +80,9 @@ def load_data(): # information directly with the sample: sample = {"path": path, "image": image} -transformed_sample = transform(sample) +new_sample = transform(sample) -assert transformed_sample["path"] is sample["path"] +assert new_sample["path"] is sample["path"] ######################################################################################################################## # As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus