diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index 1c1d6555ce8..b50147debf7 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -20,7 +20,7 @@ class BalancedPositiveNegativeSampler(object): """ def __init__(self, batch_size_per_image, positive_fraction): - # type: (int, float) + # type: (int, float) -> None """ Arguments: batch_size_per_image (int): number of elements to be selected per image @@ -30,7 +30,7 @@ def __init__(self, batch_size_per_image, positive_fraction): self.positive_fraction = positive_fraction def __call__(self, matched_idxs): - # type: (List[Tensor]) + # type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] """ Arguments: matched idxs: list of tensors containing -1, 0 or positive values. @@ -139,7 +139,7 @@ class BoxCoder(object): """ def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)): - # type: (Tuple[float, float, float, float], float) + # type: (Tuple[float, float, float, float], float) -> None """ Arguments: weights (4-element tuple) @@ -149,7 +149,7 @@ def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)): self.bbox_xform_clip = bbox_xform_clip def encode(self, reference_boxes, proposals): - # type: (List[Tensor], List[Tensor]) + # type: (List[Tensor], List[Tensor]) -> List[Tensor] boxes_per_image = [len(b) for b in reference_boxes] reference_boxes = torch.cat(reference_boxes, dim=0) proposals = torch.cat(proposals, dim=0) @@ -173,7 +173,7 @@ def encode_single(self, reference_boxes, proposals): return targets def decode(self, rel_codes, boxes): - # type: (Tensor, List[Tensor]) + # type: (Tensor, List[Tensor]) -> Tensor assert isinstance(boxes, (list, tuple)) assert isinstance(rel_codes, torch.Tensor) boxes_per_image = [b.size(0) for b in boxes] @@ -251,7 +251,7 @@ class Matcher(object): } def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False): - # type: (float, float, bool) + # type: (float, float, bool) -> None """ Args: high_threshold (float): quality values greater than or equal to diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 16a1d2fb141..e58517c6a21 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -42,7 +42,7 @@ def eager_outputs(self, losses, detections): return detections def forward(self, images, targets=None): - # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) + # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] """ Arguments: images (list[Tensor]): images to be processed diff --git a/torchvision/models/detection/image_list.py b/torchvision/models/detection/image_list.py index 122b552752d..f667f6500fd 100644 --- a/torchvision/models/detection/image_list.py +++ b/torchvision/models/detection/image_list.py @@ -14,7 +14,7 @@ class ImageList(object): """ def __init__(self, tensors, image_sizes): - # type: (Tensor, List[Tuple[int, int]]) + # type: (Tensor, List[Tuple[int, int]]) -> None """ Arguments: tensors (tensor) @@ -24,6 +24,6 @@ def __init__(self, tensors, image_sizes): self.image_sizes = image_sizes def to(self, device): - # type: (Device) # noqa + # type: (Device) -> ImageList # noqa cast_tensor = self.tensors.to(device) return ImageList(cast_tensor, self.image_sizes) diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index d3ef48c8470..de97adc08f0 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -15,7 +15,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): - # type: (Tensor, Tensor, List[Tensor], List[Tensor]) + # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] """ Computes the loss for Faster R-CNN. @@ -55,7 +55,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): def maskrcnn_inference(x, labels): - # type: (Tensor, List[Tensor]) + # type: (Tensor, List[Tensor]) -> List[Tensor] """ From the results of the CNN, post process the masks by taking the mask corresponding to the class with max @@ -91,7 +91,7 @@ def maskrcnn_inference(x, labels): def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M): - # type: (Tensor, Tensor, Tensor, int) + # type: (Tensor, Tensor, Tensor, int) -> Tensor """ Given segmentation masks and the bounding boxes corresponding to the location of the masks in the image, this function @@ -106,7 +106,7 @@ def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M): def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs): - # type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor """ Arguments: proposals (list[BoxList]) @@ -139,7 +139,7 @@ def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs def keypoints_to_heatmap(keypoints, rois, heatmap_size): - # type: (Tensor, Tensor, int) + # type: (Tensor, Tensor, int) -> Tuple[Tensor, Tensor] offset_x = rois[:, 0] offset_y = rois[:, 1] scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) @@ -283,7 +283,7 @@ def heatmaps_to_keypoints(maps, rois): def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs): - # type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor N, K, H, W = keypoint_logits.shape assert H == W discretization_size = H @@ -313,7 +313,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched def keypointrcnn_inference(x, boxes): - # type: (Tensor, List[Tensor]) + # type: (Tensor, List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] kp_probs = [] kp_scores = [] @@ -335,7 +335,7 @@ def keypointrcnn_inference(x, boxes): def _onnx_expand_boxes(boxes, scale): - # type: (Tensor, float) + # type: (Tensor, float) -> Tensor w_half = (boxes[:, 2] - boxes[:, 0]) * .5 h_half = (boxes[:, 3] - boxes[:, 1]) * .5 x_c = (boxes[:, 2] + boxes[:, 0]) * .5 @@ -356,7 +356,7 @@ def _onnx_expand_boxes(boxes, scale): # but are kept here for the moment while we need them # temporarily for paste_mask_in_image def expand_boxes(boxes, scale): - # type: (Tensor, float) + # type: (Tensor, float) -> Tensor if torchvision._is_tracing(): return _onnx_expand_boxes(boxes, scale) w_half = (boxes[:, 2] - boxes[:, 0]) * .5 @@ -382,7 +382,7 @@ def expand_masks_tracing_scale(M, padding): def expand_masks(mask, padding): - # type: (Tensor, int) + # type: (Tensor, int) -> Tuple[Tensor, float] M = mask.shape[-1] if torch._C._get_tracing_state(): # could not import is_tracing(), not sure why scale = expand_masks_tracing_scale(M, padding) @@ -393,7 +393,7 @@ def expand_masks(mask, padding): def paste_mask_in_image(mask, box, im_h, im_w): - # type: (Tensor, Tensor, int, int) + # type: (Tensor, Tensor, int, int) -> Tensor TO_REMOVE = 1 w = int(box[2] - box[0] + TO_REMOVE) h = int(box[3] - box[1] + TO_REMOVE) @@ -471,7 +471,7 @@ def _onnx_paste_masks_in_image_loop(masks, boxes, im_h, im_w): def paste_masks_in_image(masks, boxes, img_shape, padding=1): - # type: (Tensor, Tensor, Tuple[int, int], int) + # type: (Tensor, Tensor, Tuple[int, int], int) -> Tensor masks, scale = expand_masks(masks, padding=padding) boxes = expand_boxes(boxes, scale).to(dtype=torch.int64) im_h, im_w = img_shape @@ -570,7 +570,7 @@ def has_keypoint(self): return True def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels): - # type: (List[Tensor], List[Tensor], List[Tensor]) + # type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] matched_idxs = [] labels = [] for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels): @@ -607,7 +607,7 @@ def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels): return matched_idxs, labels def subsample(self, labels): - # type: (List[Tensor]) + # type: (List[Tensor]) -> List[Tensor] sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) sampled_inds = [] for img_idx, (pos_inds_img, neg_inds_img) in enumerate( @@ -618,7 +618,7 @@ def subsample(self, labels): return sampled_inds def add_gt_proposals(self, proposals, gt_boxes): - # type: (List[Tensor], List[Tensor]) + # type: (List[Tensor], List[Tensor]) -> List[Tensor] proposals = [ torch.cat((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes) @@ -627,22 +627,25 @@ def add_gt_proposals(self, proposals, gt_boxes): return proposals def DELTEME_all(self, the_list): - # type: (List[bool]) + # type: (List[bool]) -> bool for i in the_list: if not i: return False return True def check_targets(self, targets): - # type: (Optional[List[Dict[str, Tensor]]]) + # type: (Optional[List[Dict[str, Tensor]]]) -> None assert targets is not None assert self.DELTEME_all(["boxes" in t for t in targets]) assert self.DELTEME_all(["labels" in t for t in targets]) if self.has_mask(): assert self.DELTEME_all(["masks" in t for t in targets]) - def select_training_samples(self, proposals, targets): - # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) + def select_training_samples(self, + proposals, # type: List[Tensor] + targets # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]] self.check_targets(targets) assert targets is not None dtype = proposals[0].dtype @@ -674,8 +677,13 @@ def select_training_samples(self, proposals, targets): regression_targets = self.box_coder.encode(matched_gt_boxes, proposals) return proposals, matched_idxs, labels, regression_targets - def postprocess_detections(self, class_logits, box_regression, proposals, image_shapes): - # type: (Tensor, Tensor, List[Tensor], List[Tuple[int, int]]) + def postprocess_detections(self, + class_logits, # type: Tensor + box_regression, # type: Tensor + proposals, # type: List[Tensor] + image_shapes # type: List[Tuple[int, int]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]] device = class_logits.device num_classes = class_logits.shape[-1] @@ -727,8 +735,13 @@ def postprocess_detections(self, class_logits, box_regression, proposals, image_ return all_boxes, all_scores, all_labels - def forward(self, features, proposals, image_shapes, targets=None): - # type: (Dict[str, Tensor], List[Tensor], List[Tuple[int, int]], Optional[List[Dict[str, Tensor]]]) + def forward(self, + features, # type: Dict[str, Tensor] + proposals, # type: List[Tensor] + image_shapes, # type: List[Tuple[int, int]] + targets=None # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]] """ Arguments: features (List[Tensor]) diff --git a/torchvision/models/detection/rpn.py b/torchvision/models/detection/rpn.py index c43c63d77b6..32a20e36f1d 100644 --- a/torchvision/models/detection/rpn.py +++ b/torchvision/models/detection/rpn.py @@ -77,7 +77,7 @@ def __init__( # (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios) # This method assumes aspect ratio = height / width for an anchor. def generate_anchors(self, scales, aspect_ratios, dtype=torch.float32, device="cpu"): - # type: (List[int], List[float], int, Device) # noqa: F821 + # type: (List[int], List[float], int, Device) -> Tensor # noqa: F821 scales = torch.as_tensor(scales, dtype=dtype, device=device) aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device) h_ratios = torch.sqrt(aspect_ratios) @@ -90,7 +90,7 @@ def generate_anchors(self, scales, aspect_ratios, dtype=torch.float32, device="c return base_anchors.round() def set_cell_anchors(self, dtype, device): - # type: (int, Device) -> None # noqa: F821 + # type: (int, Device) -> None # noqa: F821 if self.cell_anchors is not None: cell_anchors = self.cell_anchors assert cell_anchors is not None @@ -116,7 +116,7 @@ def num_anchors_per_location(self): # For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2), # output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a. def grid_anchors(self, grid_sizes, strides): - # type: (List[List[int]], List[List[Tensor]]) + # type: (List[List[int]], List[List[Tensor]]) -> List[Tensor] anchors = [] cell_anchors = self.cell_anchors assert cell_anchors is not None @@ -149,7 +149,7 @@ def grid_anchors(self, grid_sizes, strides): return anchors def cached_grid_anchors(self, grid_sizes, strides): - # type: (List[List[int]], List[List[Tensor]]) + # type: (List[List[int]], List[List[Tensor]]) -> List[Tensor] key = str(grid_sizes) + str(strides) if key in self._cache: return self._cache[key] @@ -158,7 +158,7 @@ def cached_grid_anchors(self, grid_sizes, strides): return anchors def forward(self, image_list, feature_maps): - # type: (ImageList, List[Tensor]) + # type: (ImageList, List[Tensor]) -> List[Tensor] grid_sizes = list([feature_map.shape[-2:] for feature_map in feature_maps]) image_size = image_list.tensors.shape[-2:] dtype, device = feature_maps[0].dtype, feature_maps[0].device @@ -202,7 +202,7 @@ def __init__(self, in_channels, num_anchors): torch.nn.init.constant_(l.bias, 0) def forward(self, x): - # type: (List[Tensor]) + # type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] logits = [] bbox_reg = [] for feature in x: @@ -213,7 +213,7 @@ def forward(self, x): def permute_and_flatten(layer, N, A, C, H, W): - # type: (Tensor, int, int, int, int, int) + # type: (Tensor, int, int, int, int, int) -> Tensor layer = layer.view(N, -1, C, H, W) layer = layer.permute(0, 3, 4, 1, 2) layer = layer.reshape(N, -1, C) @@ -221,7 +221,7 @@ def permute_and_flatten(layer, N, A, C, H, W): def concat_box_prediction_layers(box_cls, box_regression): - # type: (List[Tensor], List[Tensor]) + # type: (List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] box_cls_flattened = [] box_regression_flattened = [] # for each feature level, permute the outputs to make them be in the @@ -327,7 +327,7 @@ def post_nms_top_n(self): return self._post_nms_top_n['testing'] def assign_targets_to_anchors(self, anchors, targets): - # type: (List[Tensor], List[Dict[str, Tensor]]) + # type: (List[Tensor], List[Dict[str, Tensor]]) -> Tuple[List[Tensor], List[Tensor]] labels = [] matched_gt_boxes = [] for anchors_per_image, targets_per_image in zip(anchors, targets): @@ -363,7 +363,7 @@ def assign_targets_to_anchors(self, anchors, targets): return labels, matched_gt_boxes def _get_top_n_idx(self, objectness, num_anchors_per_level): - # type: (Tensor, List[int]) + # type: (Tensor, List[int]) -> Tensor r = [] offset = 0 for ob in objectness.split(num_anchors_per_level, 1): @@ -378,7 +378,7 @@ def _get_top_n_idx(self, objectness, num_anchors_per_level): return torch.cat(r, dim=1) def filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_level): - # type: (Tensor, Tensor, List[Tuple[int, int]], List[int]) + # type: (Tensor, Tensor, List[Tuple[int, int]], List[int]) -> Tuple[List[Tensor], List[Tensor]] num_images = proposals.shape[0] device = proposals.device # do not backprop throught objectness @@ -418,7 +418,7 @@ def filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_ return final_boxes, final_scores def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets): - # type: (Tensor, Tensor, List[Tensor], List[Tensor]) + # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] """ Arguments: objectness (Tensor) @@ -455,8 +455,12 @@ def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets) return objectness_loss, box_loss - def forward(self, images, features, targets=None): - # type: (ImageList, Dict[str, Tensor], Optional[List[Dict[str, Tensor]]]) + def forward(self, + images, # type: ImageList + features, # type: Dict[str, Tensor] + targets=None # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Tensor], Dict[str, Tensor]] """ Arguments: images (ImageList): images for which we want to compute the predictions diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index 79bb9ed6fe7..33404142dc3 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -78,8 +78,11 @@ def __init__(self, min_size, max_size, image_mean, image_std): self.image_mean = image_mean self.image_std = image_std - def forward(self, images, targets=None): - # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) + def forward(self, + images, # type: List[Tensor] + targets=None # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[ImageList, Optional[List[Dict[str, Tensor]]]] images = [img for img in images] for i in range(len(images)): image = images[i] @@ -111,7 +114,7 @@ def normalize(self, image): return (image - mean[:, None, None]) / std[:, None, None] def torch_choice(self, l): - # type: (List[int]) + # type: (List[int]) -> int """ Implements `random.choice` via torch ops so it can be compiled with TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803 @@ -121,7 +124,7 @@ def torch_choice(self, l): return l[index] def resize(self, image, target): - # type: (Tensor, Optional[Dict[str, Tensor]]) + # type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]] h, w = image.shape[-2:] if self.training: size = float(self.torch_choice(self.min_size)) @@ -180,7 +183,7 @@ def max_by_axis(self, the_list): return maxes def batch_images(self, images, size_divisible=32): - # type: (List[Tensor], int) + # type: (List[Tensor], int) -> Tensor if torchvision._is_tracing(): # batch_images() does not export well to ONNX # call _onnx_batch_images() instead @@ -199,8 +202,12 @@ def batch_images(self, images, size_divisible=32): return batched_imgs - def postprocess(self, result, image_shapes, original_image_sizes): - # type: (List[Dict[str, Tensor]], List[Tuple[int, int]], List[Tuple[int, int]]) + def postprocess(self, + result, # type: List[Dict[str, Tensor]] + image_shapes, # type: List[Tuple[int, int]] + original_image_sizes # type: List[Tuple[int, int]] + ): + # type: (...) -> List[Dict[str, Tensor]] if self.training: return result for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)): @@ -228,7 +235,7 @@ def __repr__(self): def resize_keypoints(keypoints, original_size, new_size): - # type: (Tensor, List[int], List[int]) + # type: (Tensor, List[int], List[int]) -> Tensor ratios = [ torch.tensor(s, dtype=torch.float32, device=keypoints.device) / torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device) @@ -247,7 +254,7 @@ def resize_keypoints(keypoints, original_size, new_size): def resize_boxes(boxes, original_size, new_size): - # type: (Tensor, List[int], List[int]) + # type: (Tensor, List[int], List[int]) -> Tensor ratios = [ torch.tensor(s, dtype=torch.float32, device=boxes.device) / torch.tensor(s_orig, dtype=torch.float32, device=boxes.device) diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index 2c2fd2d36c2..b5258ffa58f 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -5,7 +5,7 @@ def nms(boxes, scores, iou_threshold): - # type: (Tensor, Tensor, float) + # type: (Tensor, Tensor, float) -> Tensor """ Performs non-maximum suppression (NMS) on the boxes according to their intersection-over-union (IoU). @@ -41,7 +41,7 @@ def nms(boxes, scores, iou_threshold): def batched_nms(boxes, scores, idxs, iou_threshold): - # type: (Tensor, Tensor, Tensor, float) + # type: (Tensor, Tensor, Tensor, float) -> Tensor """ Performs non-maximum suppression in a batched fashion. @@ -82,7 +82,7 @@ def batched_nms(boxes, scores, idxs, iou_threshold): def remove_small_boxes(boxes, min_size): - # type: (Tensor, float) + # type: (Tensor, float) -> Tensor """ Remove boxes which contains at least one side smaller than min_size. @@ -101,7 +101,7 @@ def remove_small_boxes(boxes, min_size): def clip_boxes_to_image(boxes, size): - # type: (Tensor, Tuple[int, int]) + # type: (Tensor, Tuple[int, int]) -> Tensor """ Clip boxes so that they lie inside an image of size `size`. diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py index 09e79cc7efd..a2d8c409490 100644 --- a/torchvision/ops/feature_pyramid_network.py +++ b/torchvision/ops/feature_pyramid_network.py @@ -67,7 +67,7 @@ def __init__(self, in_channels_list, out_channels, extra_blocks=None): self.extra_blocks = extra_blocks def get_result_from_inner_blocks(self, x, idx): - # type: (Tensor, int) + # type: (Tensor, int) -> Tensor """ This is equivalent to self.inner_blocks[idx](x), but torchscript doesn't support this yet @@ -86,7 +86,7 @@ def get_result_from_inner_blocks(self, x, idx): return out def get_result_from_layer_blocks(self, x, idx): - # type: (Tensor, int) + # type: (Tensor, int) -> Tensor """ This is equivalent to self.layer_blocks[idx](x), but torchscript doesn't support this yet @@ -105,7 +105,7 @@ def get_result_from_layer_blocks(self, x, idx): return out def forward(self, x): - # type: (Dict[str, Tensor]) + # type: (Dict[str, Tensor]) -> Dict[str, Tensor] """ Computes the FPN for a set of feature maps. @@ -164,7 +164,7 @@ class LastLevelMaxPool(ExtraFPNBlock): Applies a max_pool2d on top of the last feature map """ def forward(self, x, y, names): - # type: (List[Tensor], List[Tensor], List[str]) + # type: (List[Tensor], List[Tensor], List[str]) -> Tuple[List[Tensor], List[str]] names.append("pool") x.append(F.max_pool2d(x[-1], 1, 2, 0)) return x, names diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index b94a9eb4054..60e5f428c88 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -33,7 +33,7 @@ def _onnx_merge_levels(levels, unmerged_results): # TODO: (eellison) T54974082 https://github.com/pytorch/pytorch/issues/26744/pytorch/issues/26744 def initLevelMapper(k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6): - # type: (int, int, int, int, float) + # type: (int, int, int, int, float) -> LevelMapper return LevelMapper(k_min, k_max, canonical_scale, canonical_level, eps) @@ -51,7 +51,7 @@ class LevelMapper(object): """ def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6): - # type: (int, int, int, int, float) + # type: (int, int, int, int, float) -> None self.k_min = k_min self.k_max = k_max self.s0 = canonical_scale @@ -59,7 +59,7 @@ def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e- self.eps = eps def __call__(self, boxlists): - # type: (List[Tensor]) + # type: (List[Tensor]) -> Tensor """ Arguments: boxlists (list[BoxList]) @@ -118,7 +118,7 @@ def __init__(self, featmap_names, output_size, sampling_ratio): self.map_levels = None def convert_to_roi_format(self, boxes): - # type: (List[Tensor]) + # type: (List[Tensor]) -> Tensor concat_boxes = torch.cat(boxes, dim=0) device, dtype = concat_boxes.device, concat_boxes.dtype ids = torch.cat( @@ -132,7 +132,7 @@ def convert_to_roi_format(self, boxes): return rois def infer_scale(self, feature, original_size): - # type: (Tensor, List[int]) + # type: (Tensor, List[int]) -> float # assumption: the scale is of the form 2 ** (-k), with k integer size = feature.shape[-2:] possible_scales = torch.jit.annotate(List[float], []) @@ -144,7 +144,7 @@ def infer_scale(self, feature, original_size): return possible_scales[0] def setup_scales(self, features, image_shapes): - # type: (List[Tensor], List[Tuple[int, int]]) + # type: (List[Tensor], List[Tuple[int, int]]) -> None assert len(image_shapes) != 0 max_x = 0 max_y = 0 @@ -162,7 +162,7 @@ def setup_scales(self, features, image_shapes): self.map_levels = initLevelMapper(int(lvl_min), int(lvl_max)) def forward(self, x, boxes, image_shapes): - # type: (Dict[str, Tensor], List[Tensor], List[Tuple[int, int]]) + # type: (Dict[str, Tensor], List[Tensor], List[Tuple[int, int]]) -> Tensor """ Arguments: x (OrderedDict[Tensor]): feature maps for each level. They are assumed to have