Skip to content

Commit 07e7e25

Browse files
committed
Fix linter
1 parent 41d6fb4 commit 07e7e25

File tree

4 files changed

+15
-5
lines changed

4 files changed

+15
-5
lines changed

torchvision/prototype/datasets/_builtin/caltech.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,9 @@ def _prepare_sample(
110110
image=image,
111111
ann_path=ann_path,
112112
bounding_box=BoundingBox(
113-
ann["box_coord"].astype(np.int64).squeeze()[[2, 0, 3, 1]], format="xyxy", spatial_size=image.spatial_size
113+
ann["box_coord"].astype(np.int64).squeeze()[[2, 0, 3, 1]],
114+
format="xyxy",
115+
spatial_size=image.spatial_size,
114116
),
115117
contour=_Feature(ann["obj_contour"].T),
116118
)

torchvision/prototype/datasets/_builtin/coco.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,9 @@ def _resources(self) -> List[OnlineResource]:
9797
)
9898
return [images, meta]
9999

100-
def _segmentation_to_mask(self, segmentation: Any, *, is_crowd: bool, spatial_size: Tuple[int, int]) -> torch.Tensor:
100+
def _segmentation_to_mask(
101+
self, segmentation: Any, *, is_crowd: bool, spatial_size: Tuple[int, int]
102+
) -> torch.Tensor:
101103
from pycocotools import mask
102104

103105
if is_crowd:
@@ -115,7 +117,9 @@ def _decode_instances_anns(self, anns: List[Dict[str, Any]], image_meta: Dict[st
115117
segmentations=_Feature(
116118
torch.stack(
117119
[
118-
self._segmentation_to_mask(ann["segmentation"], is_crowd=ann["iscrowd"], spatial_size=spatial_size)
120+
self._segmentation_to_mask(
121+
ann["segmentation"], is_crowd=ann["iscrowd"], spatial_size=spatial_size
122+
)
119123
for ann in anns
120124
]
121125
)

torchvision/prototype/datasets/_builtin/cub200.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,9 @@ def _2010_anns_key(self, data: Tuple[str, BinaryIO]) -> Tuple[str, Tuple[str, Bi
149149
path = pathlib.Path(data[0])
150150
return path.with_suffix(".jpg").name, data
151151

152-
def _2010_prepare_ann(self, data: Tuple[str, Tuple[str, BinaryIO]], spatial_size: Tuple[int, int]) -> Dict[str, Any]:
152+
def _2010_prepare_ann(
153+
self, data: Tuple[str, Tuple[str, BinaryIO]], spatial_size: Tuple[int, int]
154+
) -> Dict[str, Any]:
153155
_, (path, buffer) = data
154156
content = read_mat(buffer)
155157
return dict(

torchvision/prototype/features/_bounding_box.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,9 @@ def resize( # type: ignore[override]
8484
max_size: Optional[int] = None,
8585
antialias: bool = False,
8686
) -> BoundingBox:
87-
output, spatial_size = self._F.resize_bounding_box(self, spatial_size=self.spatial_size, size=size, max_size=max_size)
87+
output, spatial_size = self._F.resize_bounding_box(
88+
self, spatial_size=self.spatial_size, size=size, max_size=max_size
89+
)
8890
return BoundingBox.wrap_like(self, output, spatial_size=spatial_size)
8991

9092
def crop(self, top: int, left: int, height: int, width: int) -> BoundingBox:

0 commit comments

Comments
 (0)