Skip to content

Commit 1ca922c

Browse files
yiwen-songfmassadatumbox
authored andcommitted
[fbsync] Added missing typing annotations to transforms/functional_tensor (#4236)
Summary: * style: Added missing typing annotations * style: Fixed typing * style: Fixed typing * chore: Updated mypy.ini Reviewed By: NicolasHug Differential Revision: D30417193 fbshipit-source-id: 0d0b75c78513e86bd62d13e717d14086fba0916f Co-authored-by: Francisco Massa <[email protected]> Co-authored-by: Vasilis Vryniotis <[email protected]>
1 parent e4eb6cf commit 1ca922c

File tree

2 files changed

+26
-18
lines changed

2 files changed

+26
-18
lines changed

mypy.ini

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,15 @@ ignore_errors = True
2828

2929
ignore_errors = True
3030

31-
[mypy-torchvision.transforms.*]
31+
[mypy-torchvision.transforms.functional.*]
32+
33+
ignore_errors = True
34+
35+
[mypy-torchvision.transforms.transforms.*]
36+
37+
ignore_errors = True
38+
39+
[mypy-torchvision.transforms.autoaugment.*]
3240

3341
ignore_errors = True
3442

torchvision/transforms/functional_tensor.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def _is_tensor_a_torch_image(x: Tensor) -> bool:
1111
return x.ndim >= 2
1212

1313

14-
def _assert_image_tensor(img):
14+
def _assert_image_tensor(img: Tensor) -> None:
1515
if not _is_tensor_a_torch_image(img):
1616
raise TypeError("Tensor is not a torch image.")
1717

@@ -317,7 +317,7 @@ def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:
317317
return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
318318

319319

320-
def _rgb2hsv(img):
320+
def _rgb2hsv(img: Tensor) -> Tensor:
321321
r, g, b = img.unbind(dim=-3)
322322

323323
# Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/
@@ -356,7 +356,7 @@ def _rgb2hsv(img):
356356
return torch.stack((h, s, maxc), dim=-3)
357357

358358

359-
def _hsv2rgb(img):
359+
def _hsv2rgb(img: Tensor) -> Tensor:
360360
h, s, v = img.unbind(dim=-3)
361361
i = torch.floor(h * 6.0)
362362
f = (h * 6.0) - i
@@ -388,15 +388,15 @@ def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:
388388

389389
in_sizes = img.size()
390390

391-
x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...]
391+
_x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...]
392392
left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0]
393393
right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3]
394-
x_indices = torch.tensor(left_indices + x_indices + right_indices, device=img.device)
394+
x_indices = torch.tensor(left_indices + _x_indices + right_indices, device=img.device)
395395

396-
y_indices = [i for i in range(in_sizes[-2])]
396+
_y_indices = [i for i in range(in_sizes[-2])]
397397
top_indices = [i for i in range(padding[2] - 1, -1, -1)]
398398
bottom_indices = [-(i + 1) for i in range(padding[3])]
399-
y_indices = torch.tensor(top_indices + y_indices + bottom_indices, device=img.device)
399+
y_indices = torch.tensor(top_indices + _y_indices + bottom_indices, device=img.device)
400400

401401
ndim = img.ndim
402402
if ndim == 3:
@@ -560,13 +560,13 @@ def resize(
560560

561561

562562
def _assert_grid_transform_inputs(
563-
img: Tensor,
564-
matrix: Optional[List[float]],
565-
interpolation: str,
566-
fill: Optional[List[float]],
567-
supported_interpolation_modes: List[str],
568-
coeffs: Optional[List[float]] = None,
569-
):
563+
img: Tensor,
564+
matrix: Optional[List[float]],
565+
interpolation: str,
566+
fill: Optional[List[float]],
567+
supported_interpolation_modes: List[str],
568+
coeffs: Optional[List[float]] = None,
569+
) -> None:
570570

571571
if not (isinstance(img, torch.Tensor)):
572572
raise TypeError("Input img should be Tensor")
@@ -612,7 +612,7 @@ def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor
612612
return img, need_cast, need_squeeze, out_dtype
613613

614614

615-
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype):
615+
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype) -> Tensor:
616616
if need_squeeze:
617617
img = img.squeeze(dim=0)
618618

@@ -732,7 +732,7 @@ def rotate(
732732
return _apply_grid_transform(img, grid, interpolation, fill=fill)
733733

734734

735-
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device):
735+
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> Tensor:
736736
# https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
737737
# src/libImaging/Geometry.c#L394
738738

@@ -922,7 +922,7 @@ def autocontrast(img: Tensor) -> Tensor:
922922
return ((img - minimum) * scale).clamp(0, bound).to(img.dtype)
923923

924924

925-
def _scale_channel(img_chan):
925+
def _scale_channel(img_chan: Tensor) -> Tensor:
926926
# TODO: we should expect bincount to always be faster than histc, but this
927927
# isn't always the case. Once
928928
# https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if

0 commit comments

Comments
 (0)