Skip to content

Commit 55f7faf

Browse files
authored
Add api usage log to transforms (#5007)
* add api usage log to functional transforms * add log to transforms * fix for scriptablity * skip Compose for scriptability * follow the new policy * torchscriptbility * adopt new API * make Compose scriptable * move from __call__ to __init__
1 parent da7680f commit 55f7faf

File tree

2 files changed

+112
-0
lines changed

2 files changed

+112
-0
lines changed

torchvision/transforms/functional.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
except ImportError:
1515
accimage = None
1616

17+
from ..utils import _log_api_usage_once
1718
from . import functional_pil as F_pil
1819
from . import functional_tensor as F_t
1920

@@ -67,6 +68,8 @@ def get_image_size(img: Tensor) -> List[int]:
6768
Returns:
6869
List[int]: The image size.
6970
"""
71+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
72+
_log_api_usage_once(get_image_size)
7073
if isinstance(img, torch.Tensor):
7174
return F_t.get_image_size(img)
7275

@@ -82,6 +85,8 @@ def get_image_num_channels(img: Tensor) -> int:
8285
Returns:
8386
int: The number of channels.
8487
"""
88+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
89+
_log_api_usage_once(get_image_num_channels)
8590
if isinstance(img, torch.Tensor):
8691
return F_t.get_image_num_channels(img)
8792

@@ -110,6 +115,8 @@ def to_tensor(pic):
110115
Returns:
111116
Tensor: Converted image.
112117
"""
118+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
119+
_log_api_usage_once(to_tensor)
113120
if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
114121
raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
115122

@@ -166,6 +173,8 @@ def pil_to_tensor(pic):
166173
Returns:
167174
Tensor: Converted image.
168175
"""
176+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
177+
_log_api_usage_once(pil_to_tensor)
169178
if not F_pil._is_pil_image(pic):
170179
raise TypeError(f"pic should be PIL Image. Got {type(pic)}")
171180

@@ -205,6 +214,8 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -
205214
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
206215
of the integer ``dtype``.
207216
"""
217+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
218+
_log_api_usage_once(convert_image_dtype)
208219
if not isinstance(image, torch.Tensor):
209220
raise TypeError("Input img should be Tensor Image")
210221

@@ -225,6 +236,8 @@ def to_pil_image(pic, mode=None):
225236
Returns:
226237
PIL Image: Image converted to PIL Image.
227238
"""
239+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
240+
_log_api_usage_once(to_pil_image)
228241
if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
229242
raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")
230243

@@ -322,6 +335,8 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool
322335
Returns:
323336
Tensor: Normalized Tensor image.
324337
"""
338+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
339+
_log_api_usage_once(normalize)
325340
if not isinstance(tensor, torch.Tensor):
326341
raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.")
327342

@@ -401,6 +416,8 @@ def resize(
401416
Returns:
402417
PIL Image or Tensor: Resized image.
403418
"""
419+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
420+
_log_api_usage_once(resize)
404421
# Backward compatibility with integer value
405422
if isinstance(interpolation, int):
406423
warnings.warn(
@@ -422,6 +439,8 @@ def resize(
422439

423440

424441
def scale(*args, **kwargs):
442+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
443+
_log_api_usage_once(scale)
425444
warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.")
426445
return resize(*args, **kwargs)
427446

@@ -467,6 +486,8 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con
467486
Returns:
468487
PIL Image or Tensor: Padded image.
469488
"""
489+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
490+
_log_api_usage_once(pad)
470491
if not isinstance(img, torch.Tensor):
471492
return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
472493

@@ -490,6 +511,8 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
490511
PIL Image or Tensor: Cropped image.
491512
"""
492513

514+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
515+
_log_api_usage_once(crop)
493516
if not isinstance(img, torch.Tensor):
494517
return F_pil.crop(img, top, left, height, width)
495518

@@ -510,6 +533,8 @@ def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
510533
Returns:
511534
PIL Image or Tensor: Cropped image.
512535
"""
536+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
537+
_log_api_usage_once(center_crop)
513538
if isinstance(output_size, numbers.Number):
514539
output_size = (int(output_size), int(output_size))
515540
elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
@@ -566,6 +591,8 @@ def resized_crop(
566591
Returns:
567592
PIL Image or Tensor: Cropped image.
568593
"""
594+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
595+
_log_api_usage_once(resized_crop)
569596
img = crop(img, top, left, height, width)
570597
img = resize(img, size, interpolation)
571598
return img
@@ -583,6 +610,8 @@ def hflip(img: Tensor) -> Tensor:
583610
Returns:
584611
PIL Image or Tensor: Horizontally flipped image.
585612
"""
613+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
614+
_log_api_usage_once(hflip)
586615
if not isinstance(img, torch.Tensor):
587616
return F_pil.hflip(img)
588617

@@ -648,6 +677,8 @@ def perspective(
648677
Returns:
649678
PIL Image or Tensor: transformed Image.
650679
"""
680+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
681+
_log_api_usage_once(perspective)
651682

652683
coeffs = _get_perspective_coeffs(startpoints, endpoints)
653684

@@ -681,6 +712,8 @@ def vflip(img: Tensor) -> Tensor:
681712
Returns:
682713
PIL Image or Tensor: Vertically flipped image.
683714
"""
715+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
716+
_log_api_usage_once(vflip)
684717
if not isinstance(img, torch.Tensor):
685718
return F_pil.vflip(img)
686719

@@ -706,6 +739,8 @@ def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Ten
706739
tuple: tuple (tl, tr, bl, br, center)
707740
Corresponding top left, top right, bottom left, bottom right and center crop.
708741
"""
742+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
743+
_log_api_usage_once(five_crop)
709744
if isinstance(size, numbers.Number):
710745
size = (int(size), int(size))
711746
elif isinstance(size, (tuple, list)) and len(size) == 1:
@@ -753,6 +788,8 @@ def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[
753788
Corresponding top left, top right, bottom left, bottom right and
754789
center crop and same for the flipped image.
755790
"""
791+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
792+
_log_api_usage_once(ten_crop)
756793
if isinstance(size, numbers.Number):
757794
size = (int(size), int(size))
758795
elif isinstance(size, (tuple, list)) and len(size) == 1:
@@ -786,6 +823,8 @@ def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
786823
Returns:
787824
PIL Image or Tensor: Brightness adjusted image.
788825
"""
826+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
827+
_log_api_usage_once(adjust_brightness)
789828
if not isinstance(img, torch.Tensor):
790829
return F_pil.adjust_brightness(img, brightness_factor)
791830

@@ -806,6 +845,8 @@ def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
806845
Returns:
807846
PIL Image or Tensor: Contrast adjusted image.
808847
"""
848+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
849+
_log_api_usage_once(adjust_contrast)
809850
if not isinstance(img, torch.Tensor):
810851
return F_pil.adjust_contrast(img, contrast_factor)
811852

@@ -826,6 +867,8 @@ def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
826867
Returns:
827868
PIL Image or Tensor: Saturation adjusted image.
828869
"""
870+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
871+
_log_api_usage_once(adjust_saturation)
829872
if not isinstance(img, torch.Tensor):
830873
return F_pil.adjust_saturation(img, saturation_factor)
831874

@@ -860,6 +903,8 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
860903
Returns:
861904
PIL Image or Tensor: Hue adjusted image.
862905
"""
906+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
907+
_log_api_usage_once(adjust_hue)
863908
if not isinstance(img, torch.Tensor):
864909
return F_pil.adjust_hue(img, hue_factor)
865910

@@ -891,6 +936,8 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
891936
Returns:
892937
PIL Image or Tensor: Gamma correction adjusted image.
893938
"""
939+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
940+
_log_api_usage_once(adjust_gamma)
894941
if not isinstance(img, torch.Tensor):
895942
return F_pil.adjust_gamma(img, gamma, gain)
896943

@@ -987,6 +1034,8 @@ def rotate(
9871034
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
9881035
9891036
"""
1037+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1038+
_log_api_usage_once(rotate)
9901039
if resample is not None:
9911040
warnings.warn(
9921041
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
@@ -1067,6 +1116,8 @@ def affine(
10671116
Returns:
10681117
PIL Image or Tensor: Transformed image.
10691118
"""
1119+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1120+
_log_api_usage_once(affine)
10701121
if resample is not None:
10711122
warnings.warn(
10721123
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
@@ -1151,6 +1202,8 @@ def to_grayscale(img, num_output_channels=1):
11511202
- if num_output_channels = 1 : returned image is single channel
11521203
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
11531204
"""
1205+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1206+
_log_api_usage_once(to_grayscale)
11541207
if isinstance(img, Image.Image):
11551208
return F_pil.to_grayscale(img, num_output_channels)
11561209

@@ -1176,6 +1229,8 @@ def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
11761229
- if num_output_channels = 1 : returned image is single channel
11771230
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
11781231
"""
1232+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1233+
_log_api_usage_once(rgb_to_grayscale)
11791234
if not isinstance(img, torch.Tensor):
11801235
return F_pil.to_grayscale(img, num_output_channels)
11811236

@@ -1198,6 +1253,8 @@ def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool
11981253
Returns:
11991254
Tensor Image: Erased image.
12001255
"""
1256+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1257+
_log_api_usage_once(erase)
12011258
if not isinstance(img, torch.Tensor):
12021259
raise TypeError(f"img should be Tensor Image. Got {type(img)}")
12031260

@@ -1234,6 +1291,8 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[floa
12341291
Returns:
12351292
PIL Image or Tensor: Gaussian Blurred version of the image.
12361293
"""
1294+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1295+
_log_api_usage_once(gaussian_blur)
12371296
if not isinstance(kernel_size, (int, list, tuple)):
12381297
raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
12391298
if isinstance(kernel_size, int):
@@ -1285,6 +1344,8 @@ def invert(img: Tensor) -> Tensor:
12851344
Returns:
12861345
PIL Image or Tensor: Color inverted image.
12871346
"""
1347+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1348+
_log_api_usage_once(invert)
12881349
if not isinstance(img, torch.Tensor):
12891350
return F_pil.invert(img)
12901351

@@ -1304,6 +1365,8 @@ def posterize(img: Tensor, bits: int) -> Tensor:
13041365
Returns:
13051366
PIL Image or Tensor: Posterized image.
13061367
"""
1368+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1369+
_log_api_usage_once(posterize)
13071370
if not (0 <= bits <= 8):
13081371
raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")
13091372

@@ -1325,6 +1388,8 @@ def solarize(img: Tensor, threshold: float) -> Tensor:
13251388
Returns:
13261389
PIL Image or Tensor: Solarized image.
13271390
"""
1391+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1392+
_log_api_usage_once(solarize)
13281393
if not isinstance(img, torch.Tensor):
13291394
return F_pil.solarize(img, threshold)
13301395

@@ -1345,6 +1410,8 @@ def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
13451410
Returns:
13461411
PIL Image or Tensor: Sharpness adjusted image.
13471412
"""
1413+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1414+
_log_api_usage_once(adjust_sharpness)
13481415
if not isinstance(img, torch.Tensor):
13491416
return F_pil.adjust_sharpness(img, sharpness_factor)
13501417

@@ -1365,6 +1432,8 @@ def autocontrast(img: Tensor) -> Tensor:
13651432
Returns:
13661433
PIL Image or Tensor: An image that was autocontrasted.
13671434
"""
1435+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1436+
_log_api_usage_once(autocontrast)
13681437
if not isinstance(img, torch.Tensor):
13691438
return F_pil.autocontrast(img)
13701439

@@ -1386,6 +1455,8 @@ def equalize(img: Tensor) -> Tensor:
13861455
Returns:
13871456
PIL Image or Tensor: An image that was equalized.
13881457
"""
1458+
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
1459+
_log_api_usage_once(equalize)
13891460
if not isinstance(img, torch.Tensor):
13901461
return F_pil.equalize(img)
13911462

0 commit comments

Comments
 (0)