From b2f6615a59c1047a91affef2ed893d38839c7e27 Mon Sep 17 00:00:00 2001 From: Aditya Oke Date: Thu, 20 May 2021 12:47:27 +0530 Subject: [PATCH 1/3] fix --- docs/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Makefile b/docs/Makefile index 58daa471f5c..ef4cdc0cee0 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -2,7 +2,7 @@ # # You can set these variables from the command line. -SPHINXOPTS = -W # turn warnings into errors +SPHINXOPTS = # turn warnings into errors SPHINXBUILD = sphinx-build SPHINXPROJ = torchvision SOURCEDIR = source From 84163347f09f674d03cdbeac3dcd2ac61592ac0a Mon Sep 17 00:00:00 2001 From: Aditya Oke Date: Sat, 31 Jul 2021 11:29:40 +0530 Subject: [PATCH 2/3] add typings to functional_tensor --- torchvision/transforms/functional_tensor.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 5a13bd5d392..1f674fd10f8 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -4,14 +4,14 @@ from torch import Tensor from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad from torch.jit.annotations import BroadcastingList2 -from typing import Optional, Tuple, List +from typing import Any, Optional, Tuple, List def _is_tensor_a_torch_image(x: Tensor) -> bool: return x.ndim >= 2 -def _assert_image_tensor(img): +def _assert_image_tensor(img: Any) -> None: if not _is_tensor_a_torch_image(img): raise TypeError("Tensor is not a torch image.") @@ -317,7 +317,7 @@ def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor: return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype) -def _rgb2hsv(img): +def _rgb2hsv(img: Tensor) -> Tensor: r, g, b = img.unbind(dim=-3) # Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/ @@ -356,7 +356,7 @@ def _rgb2hsv(img): return torch.stack((h, s, maxc), dim=-3) -def _hsv2rgb(img): +def _hsv2rgb(img: Tensor) -> Tensor: h, s, v = img.unbind(dim=-3) i = torch.floor(h * 6.0) f = (h * 6.0) - i @@ -566,7 +566,7 @@ def _assert_grid_transform_inputs( fill: Optional[List[float]], supported_interpolation_modes: List[str], coeffs: Optional[List[float]] = None, -): +) -> None: if not (isinstance(img, torch.Tensor)): raise TypeError("Input img should be Tensor") @@ -612,7 +612,7 @@ def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor return img, need_cast, need_squeeze, out_dtype -def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype): +def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype) -> Tensor: if need_squeeze: img = img.squeeze(dim=0) @@ -732,7 +732,7 @@ def rotate( return _apply_grid_transform(img, grid, interpolation, fill=fill) -def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device): +def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> Tensor: # https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/ # src/libImaging/Geometry.c#L394 @@ -922,7 +922,7 @@ def autocontrast(img: Tensor) -> Tensor: return ((img - minimum) * scale).clamp(0, bound).to(img.dtype) -def _scale_channel(img_chan): +def _scale_channel(img_chan: Tensor) -> Tensor: # TODO: we should expect bincount to always be faster than histc, but this # isn't always the case. Once # https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if From 8f81ff2f0eec59d7edf0ab5c0fc9ee40883f4a89 Mon Sep 17 00:00:00 2001 From: Aditya Oke Date: Sat, 31 Jul 2021 11:37:37 +0530 Subject: [PATCH 3/3] fix ci --- torchvision/transforms/functional_tensor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 1f674fd10f8..0a11cdae815 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -4,14 +4,14 @@ from torch import Tensor from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad from torch.jit.annotations import BroadcastingList2 -from typing import Any, Optional, Tuple, List +from typing import Optional, Tuple, List def _is_tensor_a_torch_image(x: Tensor) -> bool: return x.ndim >= 2 -def _assert_image_tensor(img: Any) -> None: +def _assert_image_tensor(img: Tensor) -> None: if not _is_tensor_a_torch_image(img): raise TypeError("Tensor is not a torch image.")