Skip to content

Commit 72d7077

Browse files
committed
Review Features prototype
1 parent 7cffef6 commit 72d7077

File tree

5 files changed

+12
-3
lines changed

5 files changed

+12
-3
lines changed

torchvision/prototype/features/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,5 @@
44
from ._image import ColorSpace, Image
55
from ._label import Label, OneHotLabel
66
from ._segmentation_mask import SegmentationMask
7+
8+
# We put lots of effort on Video this half. We will need to figure out video tensors as well in this prototype

torchvision/prototype/features/_bounding_box.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ class BoundingBoxFormat(StrEnum):
1515

1616

1717
class BoundingBox(Feature):
18-
formats = BoundingBoxFormat
18+
formats = BoundingBoxFormat # Couldn't find a use of this in code. Is there a reason why we don't just let people access the enums directly?
1919
format: BoundingBoxFormat
2020
image_size: Tuple[int, int]
2121

@@ -40,6 +40,9 @@ def __new__(
4040
def to_format(self, format: Union[str, BoundingBoxFormat]) -> "BoundingBox":
4141
# import at runtime to avoid cyclic imports
4242
from torchvision.prototype.transforms.functional import convert_bounding_box_format
43+
# I think we can avoid this by not having a `to_format` method but instead require users to explicitly call the
44+
# convert method. As far as I see, the specific method is used only once on the code, so it is something we
45+
# could avoid all together.
4346

4447
if isinstance(format, str):
4548
format = BoundingBoxFormat[format]

torchvision/prototype/features/_encoded.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ def image_size(self) -> Tuple[int, int]:
4141
def decode(self) -> Image:
4242
# import at runtime to avoid cyclic imports
4343
from torchvision.prototype.transforms.functional import decode_image_with_pil
44+
# Same commens as on the BoundingBox.to_format
4445

4546
return Image(decode_image_with_pil(self))
4647

torchvision/prototype/features/_image.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313

1414
class ColorSpace(StrEnum):
1515
# this is just for test purposes
16+
# How about the transparency spaces supported by ImageReadMode?
1617
_SENTINEL = -1
1718
OTHER = 0
1819
GRAYSCALE = 1
@@ -77,7 +78,9 @@ def guess_color_space(data: torch.Tensor) -> ColorSpace:
7778
return ColorSpace.OTHER
7879

7980
def show(self) -> None:
81+
# This is a nice to have, but not a necessary method, for this early in the prototype
8082
to_pil_image(make_grid(self.view(-1, *self.shape[-3:]))).show()
8183

8284
def draw_bounding_box(self, bounding_box: BoundingBox, **kwargs: Any) -> "Image":
85+
# Same as above and nothing that this is the only method that requires to_format().
8386
return Image.new_like(self, draw_bounding_boxes(self, bounding_box.to_format("xyxy").view(-1, 4), **kwargs))

torchvision/prototype/features/_label.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def __new__(
1515
*,
1616
dtype: Optional[torch.dtype] = None,
1717
device: Optional[torch.device] = None,
18-
like: Optional["Label"] = None,
18+
like: Optional["Label"] = None, # Since are at Py3.7, perhaps we could do `from __future__ import annotations` now.
1919
categories: Optional[Sequence[str]] = None,
2020
):
2121
label = super().__new__(cls, data, dtype=dtype, device=device)
@@ -26,7 +26,7 @@ def __new__(
2626

2727
@classmethod
2828
def from_category(cls, category: str, *, categories: Sequence[str]):
29-
categories = list(categories)
29+
categories = list(categories) # why shallow copy here? If this method is in a loop, we run the risk of creating many shallow-copies
3030
return cls(categories.index(category), categories=categories)
3131

3232
def to_categories(self):

0 commit comments

Comments
 (0)