diff --git a/torchvision/datasets/vision.py b/torchvision/datasets/vision.py index 22fc85322e4..dad19f65197 100644 --- a/torchvision/datasets/vision.py +++ b/torchvision/datasets/vision.py @@ -28,6 +28,7 @@ class VisionDataset(data.Dataset): _repr_indent = 4 + @_log_api_usage_once def __init__( self, root: str, @@ -35,7 +36,6 @@ def __init__( transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, ) -> None: - _log_api_usage_once(self) if isinstance(root, torch._six.string_classes): root = os.path.expanduser(root) self.root = root diff --git a/torchvision/models/alexnet.py b/torchvision/models/alexnet.py index bb812febdc4..2a6e2ccd7fa 100644 --- a/torchvision/models/alexnet.py +++ b/torchvision/models/alexnet.py @@ -16,9 +16,9 @@ class AlexNet(nn.Module): + @_log_api_usage_once def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None: super().__init__() - _log_api_usage_once(self) self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index 14e318360af..96e5b3d1fb8 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -151,6 +151,7 @@ class DenseNet(nn.Module): but slower. Default: *False*. See `"paper" `_. """ + @_log_api_usage_once def __init__( self, growth_rate: int = 32, @@ -161,9 +162,7 @@ def __init__( num_classes: int = 1000, memory_efficient: bool = False, ) -> None: - super().__init__() - _log_api_usage_once(self) # First convolution self.features = nn.Sequential( diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 37ef1820d71..53a0402f870 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -25,9 +25,9 @@ class GeneralizedRCNN(nn.Module): the model """ + @_log_api_usage_once def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, transform: nn.Module) -> None: super().__init__() - _log_api_usage_once(self) self.transform = transform self.backbone = backbone self.rpn = rpn diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index e5ced9870ba..fa0eb4f4da1 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -316,6 +316,7 @@ class RetinaNet(nn.Module): "proposal_matcher": det_utils.Matcher, } + @_log_api_usage_once def __init__( self, backbone, @@ -337,7 +338,6 @@ def __init__( topk_candidates=1000, ): super().__init__() - _log_api_usage_once(self) if not hasattr(backbone, "out_channels"): raise ValueError( diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 5778a07075d..d14119777e2 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -165,6 +165,7 @@ class SSD(nn.Module): "proposal_matcher": det_utils.Matcher, } + @_log_api_usage_once def __init__( self, backbone: nn.Module, @@ -182,7 +183,6 @@ def __init__( positive_fraction: float = 0.25, ): super().__init__() - _log_api_usage_once(self) self.backbone = backbone diff --git a/torchvision/models/detection/ssdlite.py b/torchvision/models/detection/ssdlite.py index d35dfbf78d3..b44fa223ccc 100644 --- a/torchvision/models/detection/ssdlite.py +++ b/torchvision/models/detection/ssdlite.py @@ -111,6 +111,7 @@ def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: C class SSDLiteFeatureExtractorMobileNet(nn.Module): + @_log_api_usage_once def __init__( self, backbone: nn.Module, @@ -120,7 +121,6 @@ def __init__( min_depth: int = 16, ): super().__init__() - _log_api_usage_once(self) assert not backbone[c4_pos].use_res_connect self.features = nn.Sequential( diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index 6837018c09e..e1264a93d81 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -148,6 +148,7 @@ def forward(self, input: Tensor) -> Tensor: class EfficientNet(nn.Module): + @_log_api_usage_once def __init__( self, inverted_residual_setting: List[MBConvConfig], @@ -170,7 +171,6 @@ def __init__( norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use """ super().__init__() - _log_api_usage_once(self) if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") diff --git a/torchvision/models/googlenet.py b/torchvision/models/googlenet.py index 5e6375c1191..ebba69d2de7 100644 --- a/torchvision/models/googlenet.py +++ b/torchvision/models/googlenet.py @@ -28,6 +28,7 @@ class GoogLeNet(nn.Module): __constants__ = ["aux_logits", "transform_input"] + @_log_api_usage_once def __init__( self, num_classes: int = 1000, @@ -39,7 +40,6 @@ def __init__( dropout_aux: float = 0.7, ) -> None: super().__init__() - _log_api_usage_once(self) if blocks is None: blocks = [BasicConv2d, Inception, InceptionAux] if init_weights is None: diff --git a/torchvision/models/inception.py b/torchvision/models/inception.py index 322c2370bdd..e48fe627302 100644 --- a/torchvision/models/inception.py +++ b/torchvision/models/inception.py @@ -27,6 +27,7 @@ class Inception3(nn.Module): + @_log_api_usage_once def __init__( self, num_classes: int = 1000, @@ -37,7 +38,6 @@ def __init__( dropout: float = 0.5, ) -> None: super().__init__() - _log_api_usage_once(self) if inception_blocks is None: inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] if init_weights is None: diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 5eb27904f90..58de872c2e7 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -96,9 +96,9 @@ class MNASNet(torch.nn.Module): # Version 2 adds depth scaling in the initial stages of the network. _version = 2 + @_log_api_usage_once def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None: super().__init__() - _log_api_usage_once(self) assert alpha > 0.0 self.alpha = alpha self.num_classes = num_classes diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index 1a470953df5..38030870f21 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -86,6 +86,7 @@ def forward(self, x: Tensor) -> Tensor: class MobileNetV2(nn.Module): + @_log_api_usage_once def __init__( self, num_classes: int = 1000, @@ -111,7 +112,6 @@ def __init__( """ super().__init__() - _log_api_usage_once(self) if block is None: block = InvertedResidual diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 97239bea8ad..87f0a15efcf 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -129,6 +129,7 @@ def forward(self, input: Tensor) -> Tensor: class MobileNetV3(nn.Module): + @_log_api_usage_once def __init__( self, inverted_residual_setting: List[InvertedResidualConfig], @@ -151,7 +152,6 @@ def __init__( dropout (float): The droupout probability """ super().__init__() - _log_api_usage_once(self) if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") diff --git a/torchvision/models/regnet.py b/torchvision/models/regnet.py index 85f53751dd0..c38765c841b 100644 --- a/torchvision/models/regnet.py +++ b/torchvision/models/regnet.py @@ -299,6 +299,7 @@ def _adjust_widths_groups_compatibilty( class RegNet(nn.Module): + @_log_api_usage_once def __init__( self, block_params: BlockParams, @@ -310,7 +311,6 @@ def __init__( activation: Optional[Callable[..., nn.Module]] = None, ) -> None: super().__init__() - _log_api_usage_once(self) if stem_type is None: stem_type = SimpleStemIN diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index b0bb8d13ade..dfa8090a463 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -162,6 +162,7 @@ def forward(self, x: Tensor) -> Tensor: class ResNet(nn.Module): + @_log_api_usage_once def __init__( self, block: Type[Union[BasicBlock, Bottleneck]], @@ -174,7 +175,6 @@ def __init__( norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> None: super().__init__() - _log_api_usage_once(self) if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer diff --git a/torchvision/models/segmentation/_utils.py b/torchvision/models/segmentation/_utils.py index 0bbea5d3e81..b0c0dd28716 100644 --- a/torchvision/models/segmentation/_utils.py +++ b/torchvision/models/segmentation/_utils.py @@ -11,9 +11,9 @@ class _SimpleSegmentationModel(nn.Module): __constants__ = ["aux_classifier"] + @_log_api_usage_once def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None: super().__init__() - _log_api_usage_once(self) self.backbone = backbone self.classifier = classifier self.aux_classifier = aux_classifier diff --git a/torchvision/models/segmentation/lraspp.py b/torchvision/models/segmentation/lraspp.py index f6c2583cac1..9b32264324b 100644 --- a/torchvision/models/segmentation/lraspp.py +++ b/torchvision/models/segmentation/lraspp.py @@ -34,11 +34,11 @@ class LRASPP(nn.Module): inter_channels (int, optional): the number of channels for intermediate computations. """ + @_log_api_usage_once def __init__( self, backbone: nn.Module, low_channels: int, high_channels: int, num_classes: int, inter_channels: int = 128 ) -> None: super().__init__() - _log_api_usage_once(self) self.backbone = backbone self.classifier = LRASPPHead(low_channels, high_channels, num_classes, inter_channels) diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index f3758c54aaf..166eaddec06 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -92,6 +92,7 @@ def forward(self, x: Tensor) -> Tensor: class ShuffleNetV2(nn.Module): + @_log_api_usage_once def __init__( self, stages_repeats: List[int], @@ -100,7 +101,6 @@ def __init__( inverted_residual: Callable[..., nn.Module] = InvertedResidual, ) -> None: super().__init__() - _log_api_usage_once(self) if len(stages_repeats) != 3: raise ValueError("expected stages_repeats as list of 3 positive ints") diff --git a/torchvision/models/squeezenet.py b/torchvision/models/squeezenet.py index 2c1a30f225d..32be85e5020 100644 --- a/torchvision/models/squeezenet.py +++ b/torchvision/models/squeezenet.py @@ -34,9 +34,9 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class SqueezeNet(nn.Module): + @_log_api_usage_once def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None: super().__init__() - _log_api_usage_once(self) self.num_classes = num_classes if version == "1_0": self.features = nn.Sequential( diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py index e31fc542ca6..f9cb78fc71f 100644 --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -33,11 +33,11 @@ class VGG(nn.Module): + @_log_api_usage_once def __init__( self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5 ) -> None: super().__init__() - _log_api_usage_once(self) self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index f6899cfafeb..8ca4cd6825a 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -188,6 +188,7 @@ def __init__(self) -> None: class VideoResNet(nn.Module): + @_log_api_usage_once def __init__( self, block: Type[Union[BasicBlock, Bottleneck]], @@ -209,7 +210,6 @@ def __init__( zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False. """ super().__init__() - _log_api_usage_once(self) self.inplanes = 64 self.stem = stem() diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index 10a03a907e8..3bf61c02085 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -9,6 +9,7 @@ from ._box_convert import _box_cxcywh_to_xyxy, _box_xyxy_to_cxcywh, _box_xywh_to_xyxy, _box_xyxy_to_xywh +@_log_api_usage_once def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: """ Performs non-maximum suppression (NMS) on the boxes according @@ -34,11 +35,11 @@ def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted in decreasing order of scores """ - _log_api_usage_once("torchvision.ops.nms") _assert_has_ops() return torch.ops.torchvision.nms(boxes, scores, iou_threshold) +@_log_api_usage_once def batched_nms( boxes: Tensor, scores: Tensor, @@ -63,7 +64,6 @@ def batched_nms( Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted in decreasing order of scores """ - _log_api_usage_once("torchvision.ops.batched_nms") # Benchmarks that drove the following thresholds are at # https://github.com/pytorch/vision/issues/1311#issuecomment-781329339 # Ideally for GPU we'd use a higher threshold @@ -110,6 +110,7 @@ def _batched_nms_vanilla( return keep_indices[scores[keep_indices].sort(descending=True)[1]] +@_log_api_usage_once def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: """ Remove boxes which contains at least one side smaller than min_size. @@ -123,13 +124,13 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: Tensor[K]: indices of the boxes that have both sides larger than min_size """ - _log_api_usage_once("torchvision.ops.remove_small_boxes") ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] keep = (ws >= min_size) & (hs >= min_size) keep = torch.where(keep)[0] return keep +@_log_api_usage_once def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: """ Clip boxes so that they lie inside an image of size `size`. @@ -142,7 +143,6 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: Returns: Tensor[N, 4]: clipped boxes """ - _log_api_usage_once("torchvision.ops.clip_boxes_to_image") dim = boxes.dim() boxes_x = boxes[..., 0::2] boxes_y = boxes[..., 1::2] @@ -161,6 +161,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: return clipped_boxes.reshape(boxes.shape) +@_log_api_usage_once def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor: """ Converts boxes from given in_fmt to out_fmt. @@ -183,7 +184,6 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor: Tensor[N, 4]: Boxes into converted format. """ - _log_api_usage_once("torchvision.ops.box_convert") allowed_fmts = ("xyxy", "xywh", "cxcywh") if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts: raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") @@ -220,6 +220,7 @@ def _upcast(t: Tensor) -> Tensor: return t if t.dtype in (torch.int32, torch.int64) else t.int() +@_log_api_usage_once def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by their @@ -233,7 +234,6 @@ def box_area(boxes: Tensor) -> Tensor: Returns: Tensor[N]: the area for each box """ - _log_api_usage_once("torchvision.ops.box_area") boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) @@ -255,6 +255,7 @@ def _box_inter_union(boxes1: Tensor, boxes2: Tensor) -> Tuple[Tensor, Tensor]: return inter, union +@_log_api_usage_once def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: """ Return intersection-over-union (Jaccard index) between two sets of boxes. @@ -269,13 +270,13 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: Returns: Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ - _log_api_usage_once("torchvision.ops.box_iou") inter, union = _box_inter_union(boxes1, boxes2) iou = inter / union return iou # Implementation adapted from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py +@_log_api_usage_once def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: """ Return generalized intersection-over-union (Jaccard index) between two sets of boxes. @@ -292,7 +293,6 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: for every element in boxes1 and boxes2 """ - _log_api_usage_once("torchvision.ops.generalized_box_iou") # degenerate boxes gives inf / nan results # so do an early check assert (boxes1[:, 2:] >= boxes1[:, :2]).all() @@ -310,6 +310,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: return iou - (areai - union) / areai +@_log_api_usage_once def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor: """ Compute the bounding boxes around the provided masks. @@ -324,7 +325,6 @@ def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor: Returns: Tensor[N, 4]: bounding boxes """ - _log_api_usage_once("torchvision.ops.masks_to_boxes") if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device, dtype=torch.float) diff --git a/torchvision/ops/deform_conv.py b/torchvision/ops/deform_conv.py index 59b1cec049e..55b7ebba075 100644 --- a/torchvision/ops/deform_conv.py +++ b/torchvision/ops/deform_conv.py @@ -11,6 +11,7 @@ from ..utils import _log_api_usage_once +@_log_api_usage_once def deform_conv2d( input: Tensor, offset: Tensor, @@ -61,7 +62,6 @@ def deform_conv2d( >>> torch.Size([4, 5, 8, 8]) """ - _log_api_usage_once("torchvision.ops.deform_conv2d") _assert_has_ops() out_channels = weight.shape[0] diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py index 93caa47d04b..71a371d646f 100644 --- a/torchvision/ops/feature_pyramid_network.py +++ b/torchvision/ops/feature_pyramid_network.py @@ -70,6 +70,7 @@ class FeaturePyramidNetwork(nn.Module): """ + @_log_api_usage_once def __init__( self, in_channels_list: List[int], @@ -77,7 +78,6 @@ def __init__( extra_blocks: Optional[ExtraFPNBlock] = None, ): super().__init__() - _log_api_usage_once(self) self.inner_blocks = nn.ModuleList() self.layer_blocks = nn.ModuleList() for in_channels in in_channels_list: diff --git a/torchvision/ops/focal_loss.py b/torchvision/ops/focal_loss.py index 1a149ed4120..852ec218539 100644 --- a/torchvision/ops/focal_loss.py +++ b/torchvision/ops/focal_loss.py @@ -4,6 +4,7 @@ from ..utils import _log_api_usage_once +@_log_api_usage_once def sigmoid_focal_loss( inputs: torch.Tensor, targets: torch.Tensor, @@ -32,7 +33,6 @@ def sigmoid_focal_loss( Returns: Loss tensor with the reduction option applied. """ - _log_api_usage_once("torchvision.ops.sigmoid_focal_loss") p = torch.sigmoid(inputs) ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") p_t = p * targets + (1 - p) * (1 - targets) diff --git a/torchvision/ops/misc.py b/torchvision/ops/misc.py index fac9a3570d6..17c042c8914 100644 --- a/torchvision/ops/misc.py +++ b/torchvision/ops/misc.py @@ -50,6 +50,7 @@ class FrozenBatchNorm2d(torch.nn.Module): eps (float): a value added to the denominator for numerical stability. Default: 1e-5 """ + @_log_api_usage_once def __init__( self, num_features: int, @@ -61,7 +62,6 @@ def __init__( warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning) num_features = n super().__init__() - _log_api_usage_once(self) self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) @@ -119,6 +119,7 @@ class ConvNormActivation(torch.nn.Sequential): """ + @_log_api_usage_once def __init__( self, in_channels: int, @@ -151,7 +152,6 @@ def __init__( if activation_layer is not None: layers.append(activation_layer(inplace=inplace)) super().__init__(*layers) - _log_api_usage_once(self) self.out_channels = out_channels @@ -167,6 +167,7 @@ class SqueezeExcitation(torch.nn.Module): scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid`` """ + @_log_api_usage_once def __init__( self, input_channels: int, @@ -175,7 +176,6 @@ def __init__( scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid, ) -> None: super().__init__() - _log_api_usage_once(self) self.avgpool = torch.nn.AdaptiveAvgPool2d(1) self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1) self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1) diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index a0cd238dc75..e83160d1c38 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -144,6 +144,7 @@ class MultiScaleRoIAlign(nn.Module): __annotations__ = {"scales": Optional[List[float]], "map_levels": Optional[LevelMapper]} + @_log_api_usage_once def __init__( self, featmap_names: List[str], @@ -154,7 +155,6 @@ def __init__( canonical_level: int = 4, ): super().__init__() - _log_api_usage_once(self) if isinstance(output_size, int): output_size = (output_size, output_size) self.featmap_names = featmap_names diff --git a/torchvision/ops/ps_roi_align.py b/torchvision/ops/ps_roi_align.py index 4ed4ead89ff..ed25254f06d 100644 --- a/torchvision/ops/ps_roi_align.py +++ b/torchvision/ops/ps_roi_align.py @@ -7,6 +7,7 @@ from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape +@_log_api_usage_once def ps_roi_align( input: Tensor, boxes: Tensor, @@ -43,7 +44,6 @@ def ps_roi_align( Returns: Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs """ - _log_api_usage_once("torchvision.ops.ps_roi_align") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/ps_roi_pool.py b/torchvision/ops/ps_roi_pool.py index 6bab125f04f..c6d1701a917 100644 --- a/torchvision/ops/ps_roi_pool.py +++ b/torchvision/ops/ps_roi_pool.py @@ -7,6 +7,7 @@ from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape +@_log_api_usage_once def ps_roi_pool( input: Tensor, boxes: Tensor, @@ -37,7 +38,6 @@ def ps_roi_pool( Returns: Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs. """ - _log_api_usage_once("torchvision.ops.ps_roi_pool") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py index 3f80383855b..c5c48a216bc 100644 --- a/torchvision/ops/roi_align.py +++ b/torchvision/ops/roi_align.py @@ -10,6 +10,7 @@ from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape +@_log_api_usage_once def roi_align( input: Tensor, boxes: Union[Tensor, List[Tensor]], @@ -50,7 +51,6 @@ def roi_align( Returns: Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. """ - _log_api_usage_once("torchvision.ops.roi_align") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py index deef590c953..86134da64dd 100644 --- a/torchvision/ops/roi_pool.py +++ b/torchvision/ops/roi_pool.py @@ -10,6 +10,7 @@ from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape +@_log_api_usage_once def roi_pool( input: Tensor, boxes: Union[Tensor, List[Tensor]], @@ -39,7 +40,6 @@ def roi_pool( Returns: Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. """ - _log_api_usage_once("torchvision.ops.roi_pool") _assert_has_ops() check_roi_boxes_shape(boxes) rois = boxes diff --git a/torchvision/ops/stochastic_depth.py b/torchvision/ops/stochastic_depth.py index b2a0aec233d..999b3c23c08 100644 --- a/torchvision/ops/stochastic_depth.py +++ b/torchvision/ops/stochastic_depth.py @@ -5,6 +5,7 @@ from ..utils import _log_api_usage_once +@_log_api_usage_once def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor: """ Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth" @@ -23,7 +24,6 @@ def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) Returns: Tensor[N, ...]: The randomly zeroed tensor. """ - _log_api_usage_once("torchvision.ops.stochastic_depth") if p < 0.0 or p > 1.0: raise ValueError(f"drop probability has to be between 0 and 1, but got {p}") if mode not in ["batch", "row"]: diff --git a/torchvision/utils.py b/torchvision/utils.py index b11f4ebeecf..bcad3066a49 100644 --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -1,7 +1,8 @@ import math import pathlib import warnings -from typing import Union, Optional, List, Tuple, BinaryIO, no_type_check +from functools import wraps +from typing import Union, Optional, List, Tuple, BinaryIO import numpy as np import torch @@ -375,13 +376,16 @@ def _generate_color_palette(num_masks: int): return [tuple((i * palette) % 255) for i in range(num_masks)] -@no_type_check -def _log_api_usage_once(obj: str) -> None: # type: ignore - if torch.jit.is_scripting() or torch.jit.is_tracing(): - return - # NOTE: obj can be an object as well, but mocking it here to be - # only a string to appease torchscript - if isinstance(obj, str): - torch._C._log_api_usage_once(obj) - else: - torch._C._log_api_usage_once(f"{obj.__module__}.{obj.__class__.__name__}") +def _log_api_usage_once(f): + @wraps(f) + def wrapper(*args, **kwargs): + event = f.__module__ + if f.__name__.endswith("__init__"): + # inside module instantiation + event += args[0].__class__.__name__ + else: + event += f.__name__ + torch._C._log_api_usage_once(event) + return f(*args, **kwargs) + + return wrapper