Skip to content

Commit 3d60f49

Browse files
[*.py] Rename "Arguments:" to "Args:" (#3203)
Co-authored-by: Vasilis Vryniotis <[email protected]>
1 parent ca6fdd6 commit 3d60f49

28 files changed

+73
-73
lines changed

references/detection/group_by_aspect_ratio.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ class GroupedBatchSampler(BatchSampler):
2626
It enforces that the batch only contain elements from the same group.
2727
It also tries to provide mini-batches which follows an ordering which is
2828
as close as possible to the ordering from the original sampler.
29-
Arguments:
29+
Args:
3030
sampler (Sampler): Base sampler.
3131
group_ids (list[int]): If the sampler produces indices in range [0, N),
3232
`group_ids` must be a list of `N` ints which contains the group id of each sample.

torchvision/datasets/samplers/clip_sampler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ class UniformClipSampler(Sampler):
111111
When number of unique clips in the video is fewer than num_video_clips_per_video,
112112
repeat the clips until `num_video_clips_per_video` clips are collected
113113
114-
Arguments:
114+
Args:
115115
video_clips (VideoClips): video clips to sample from
116116
num_clips_per_video (int): number of clips to be sampled per video
117117
"""
@@ -151,7 +151,7 @@ class RandomClipSampler(Sampler):
151151
"""
152152
Samples at most `max_video_clips_per_video` clips for each video randomly
153153
154-
Arguments:
154+
Args:
155155
video_clips (VideoClips): video clips to sample from
156156
max_clips_per_video (int): maximum number of clips to be sampled per video
157157
"""

torchvision/datasets/video_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ class VideoClips(object):
8888
Recreating the clips for different clip lengths is fast, and can be done
8989
with the `compute_clips` method.
9090
91-
Arguments:
91+
Args:
9292
video_paths (List[str]): paths to the video files
9393
clip_length_in_frames (int): size of a clip in number of frames
9494
frames_between_clips (int): step (in frames) between each clip
@@ -227,7 +227,7 @@ def compute_clips(self, num_frames, step, frame_rate=None):
227227
Always returns clips of size `num_frames`, meaning that the
228228
last few frames in a video can potentially be dropped.
229229
230-
Arguments:
230+
Args:
231231
num_frames (int): number of frames for the clip
232232
step (int): distance between two clips
233233
"""
@@ -285,7 +285,7 @@ def get_clip(self, idx):
285285
"""
286286
Gets a subclip from a list of videos.
287287
288-
Arguments:
288+
Args:
289289
idx (int): index of the subclip. Must be between 0 and num_clips().
290290
291291
Returns:

torchvision/io/image.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def read_file(path: str) -> torch.Tensor:
7171
Reads and outputs the bytes contents of a file as a uint8 Tensor
7272
with one dimension.
7373
74-
Arguments:
74+
Args:
7575
path (str): the path to the file to be read
7676
7777
Returns:
@@ -86,7 +86,7 @@ def write_file(filename: str, data: torch.Tensor) -> None:
8686
Writes the contents of a uint8 tensor with one dimension to a
8787
file.
8888
89-
Arguments:
89+
Args:
9090
filename (str): the path to the file to be written
9191
data (Tensor): the contents to be written to the output file
9292
"""
@@ -99,7 +99,7 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE
9999
Optionally converts the image to the desired format.
100100
The values of the output tensor are uint8 between 0 and 255.
101101
102-
Arguments:
102+
Args:
103103
input (Tensor[1]): a one dimensional uint8 tensor containing
104104
the raw bytes of the PNG image.
105105
mode (ImageReadMode): the read mode used for optionally
@@ -162,7 +162,7 @@ def decode_jpeg(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANG
162162
Optionally converts the image to the desired format.
163163
The values of the output tensor are uint8 between 0 and 255.
164164
165-
Arguments:
165+
Args:
166166
input (Tensor[1]): a one dimensional uint8 tensor containing
167167
the raw bytes of the JPEG image.
168168
mode (ImageReadMode): the read mode used for optionally

torchvision/models/_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class IntermediateLayerGetter(nn.ModuleDict):
1818
assigned to the model. So if `model` is passed, `model.feature1` can
1919
be returned, but not `model.feature1.layer2`.
2020
21-
Arguments:
21+
Args:
2222
model (nn.Module): model on which we will extract the features
2323
return_layers (Dict[name, new_name]): a dict containing the names
2424
of the modules for which the activations will be returned as

torchvision/models/detection/_utils.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ class BalancedPositiveNegativeSampler(object):
1515
def __init__(self, batch_size_per_image, positive_fraction):
1616
# type: (int, float) -> None
1717
"""
18-
Arguments:
18+
Args:
1919
batch_size_per_image (int): number of elements to be selected per image
2020
positive_fraction (float): percentace of positive elements per batch
2121
"""
@@ -25,7 +25,7 @@ def __init__(self, batch_size_per_image, positive_fraction):
2525
def __call__(self, matched_idxs):
2626
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
2727
"""
28-
Arguments:
28+
Args:
2929
matched idxs: list of tensors containing -1, 0 or positive values.
3030
Each tensor corresponds to a specific image.
3131
-1 values are ignored, 0 are considered as negatives and > 0 as
@@ -83,7 +83,7 @@ def encode_boxes(reference_boxes, proposals, weights):
8383
Encode a set of proposals with respect to some
8484
reference boxes
8585
86-
Arguments:
86+
Args:
8787
reference_boxes (Tensor): reference boxes
8888
proposals (Tensor): boxes to be encoded
8989
"""
@@ -133,7 +133,7 @@ class BoxCoder(object):
133133
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
134134
# type: (Tuple[float, float, float, float], float) -> None
135135
"""
136-
Arguments:
136+
Args:
137137
weights (4-element tuple)
138138
bbox_xform_clip (float)
139139
"""
@@ -153,7 +153,7 @@ def encode_single(self, reference_boxes, proposals):
153153
Encode a set of proposals with respect to some
154154
reference boxes
155155
156-
Arguments:
156+
Args:
157157
reference_boxes (Tensor): reference boxes
158158
proposals (Tensor): boxes to be encoded
159159
"""
@@ -183,7 +183,7 @@ def decode_single(self, rel_codes, boxes):
183183
From a set of original boxes and encoded relative box offsets,
184184
get the decoded boxes.
185185
186-
Arguments:
186+
Args:
187187
rel_codes (Tensor): encoded boxes
188188
boxes (Tensor): reference boxes.
189189
"""
@@ -361,7 +361,7 @@ def overwrite_eps(model, eps):
361361
only when the pretrained weights are loaded to maintain compatibility
362362
with previous versions.
363363
364-
Arguments:
364+
Args:
365365
model (nn.Module): The model on which we perform the overwrite.
366366
eps (float): The new value of eps.
367367
"""

torchvision/models/detection/anchor_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class AnchorGenerator(nn.Module):
2222
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
2323
per spatial location for feature map i.
2424
25-
Arguments:
25+
Args:
2626
sizes (Tuple[Tuple[int]]):
2727
aspect_ratios (Tuple[Tuple[float]]):
2828
"""

torchvision/models/detection/backbone_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class BackboneWithFPN(nn.Module):
1414
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
1515
extract a submodel that returns the feature maps specified in return_layers.
1616
The same limitations of IntermediatLayerGetter apply here.
17-
Arguments:
17+
Args:
1818
backbone (nn.Module)
1919
return_layers (Dict[name, new_name]): a dict containing the names
2020
of the modules for which the activations will be returned as
@@ -73,7 +73,7 @@ def resnet_fpn_backbone(
7373
>>> ('3', torch.Size([1, 256, 2, 2])),
7474
>>> ('pool', torch.Size([1, 256, 1, 1]))]
7575
76-
Arguments:
76+
Args:
7777
backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
7878
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
7979
norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:

torchvision/models/detection/faster_rcnn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class FasterRCNN(GeneralizedRCNN):
4949
- labels (Int64Tensor[N]): the predicted labels for each image
5050
- scores (Tensor[N]): the scores or each prediction
5151
52-
Arguments:
52+
Args:
5353
backbone (nn.Module): the network used to compute the features for the model.
5454
It should contain a out_channels attribute, which indicates the number of output
5555
channels that each feature map has (and it should be the same for all feature maps).
@@ -239,7 +239,7 @@ class TwoMLPHead(nn.Module):
239239
"""
240240
Standard heads for FPN-based models
241241
242-
Arguments:
242+
Args:
243243
in_channels (int): number of input channels
244244
representation_size (int): size of the intermediate representation
245245
"""
@@ -264,7 +264,7 @@ class FastRCNNPredictor(nn.Module):
264264
Standard classification + bounding box regression layers
265265
for Fast R-CNN.
266266
267-
Arguments:
267+
Args:
268268
in_channels (int): number of input channels
269269
num_classes (int): number of output classes (including background)
270270
"""
@@ -341,7 +341,7 @@ def fasterrcnn_resnet50_fpn(pretrained=False, progress=True,
341341
>>> # optionally, if you want to export the model to ONNX:
342342
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
343343
344-
Arguments:
344+
Args:
345345
pretrained (bool): If True, returns a model pre-trained on COCO train2017
346346
progress (bool): If True, displays a progress bar of the download to stderr
347347
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet

torchvision/models/detection/generalized_rcnn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class GeneralizedRCNN(nn.Module):
1414
"""
1515
Main class for Generalized R-CNN.
1616
17-
Arguments:
17+
Args:
1818
backbone (nn.Module):
1919
rpn (nn.Module):
2020
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
@@ -43,7 +43,7 @@ def eager_outputs(self, losses, detections):
4343
def forward(self, images, targets=None):
4444
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
4545
"""
46-
Arguments:
46+
Args:
4747
images (list[Tensor]): images to be processed
4848
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
4949

torchvision/models/detection/image_list.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class ImageList(object):
1414

1515
def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]):
1616
"""
17-
Arguments:
17+
Args:
1818
tensors (tensor)
1919
image_sizes (list[tuple[int, int]])
2020
"""

torchvision/models/detection/keypoint_rcnn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ class KeypointRCNN(FasterRCNN):
4444
- scores (Tensor[N]): the scores or each prediction
4545
- keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
4646
47-
Arguments:
47+
Args:
4848
backbone (nn.Module): the network used to compute the features for the model.
4949
It should contain a out_channels attribute, which indicates the number of output
5050
channels that each feature map has (and it should be the same for all feature maps).
@@ -309,7 +309,7 @@ def keypointrcnn_resnet50_fpn(pretrained=False, progress=True,
309309
>>> # optionally, if you want to export the model to ONNX:
310310
>>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
311311
312-
Arguments:
312+
Args:
313313
pretrained (bool): If True, returns a model pre-trained on COCO train2017
314314
progress (bool): If True, displays a progress bar of the download to stderr
315315
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet

torchvision/models/detection/mask_rcnn.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ class MaskRCNN(FasterRCNN):
4848
obtain the final segmentation masks, the soft masks can be thresholded, generally
4949
with a value of 0.5 (mask >= 0.5)
5050
51-
Arguments:
51+
Args:
5252
backbone (nn.Module): the network used to compute the features for the model.
5353
It should contain a out_channels attribute, which indicates the number of output
5454
channels that each feature map has (and it should be the same for all feature maps).
@@ -222,7 +222,7 @@ def __init__(self, backbone, num_classes=None,
222222
class MaskRCNNHeads(nn.Sequential):
223223
def __init__(self, in_channels, layers, dilation):
224224
"""
225-
Arguments:
225+
Args:
226226
in_channels (int): number of input channels
227227
layers (list): feature dimensions of each FCN layer
228228
dilation (int): dilation rate of kernel
@@ -308,7 +308,7 @@ def maskrcnn_resnet50_fpn(pretrained=False, progress=True,
308308
>>> # optionally, if you want to export the model to ONNX:
309309
>>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11)
310310
311-
Arguments:
311+
Args:
312312
pretrained (bool): If True, returns a model pre-trained on COCO train2017
313313
progress (bool): If True, displays a progress bar of the download to stderr
314314
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet

torchvision/models/detection/retinanet.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class RetinaNetHead(nn.Module):
3434
"""
3535
A regression and classification head for use in RetinaNet.
3636
37-
Arguments:
37+
Args:
3838
in_channels (int): number of channels of the input feature
3939
num_anchors (int): number of anchors to be predicted
4040
num_classes (int): number of classes to be predicted
@@ -64,7 +64,7 @@ class RetinaNetClassificationHead(nn.Module):
6464
"""
6565
A classification head for use in RetinaNet.
6666
67-
Arguments:
67+
Args:
6868
in_channels (int): number of channels of the input feature
6969
num_anchors (int): number of anchors to be predicted
7070
num_classes (int): number of classes to be predicted
@@ -149,7 +149,7 @@ class RetinaNetRegressionHead(nn.Module):
149149
"""
150150
A regression head for use in RetinaNet.
151151
152-
Arguments:
152+
Args:
153153
in_channels (int): number of channels of the input feature
154154
num_anchors (int): number of anchors to be predicted
155155
"""
@@ -251,7 +251,7 @@ class RetinaNet(nn.Module):
251251
- labels (Int64Tensor[N]): the predicted labels for each image
252252
- scores (Tensor[N]): the scores for each prediction
253253
254-
Arguments:
254+
Args:
255255
backbone (nn.Module): the network used to compute the features for the model.
256256
It should contain an out_channels attribute, which indicates the number of output
257257
channels that each feature map has (and it should be the same for all feature maps).
@@ -457,7 +457,7 @@ def postprocess_detections(self, head_outputs, anchors, image_shapes):
457457
def forward(self, images, targets=None):
458458
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
459459
"""
460-
Arguments:
460+
Args:
461461
images (list[Tensor]): images to be processed
462462
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
463463
@@ -597,7 +597,7 @@ def retinanet_resnet50_fpn(pretrained=False, progress=True,
597597
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
598598
>>> predictions = model(x)
599599
600-
Arguments:
600+
Args:
601601
pretrained (bool): If True, returns a model pre-trained on COCO train2017
602602
progress (bool): If True, displays a progress bar of the download to stderr
603603
"""

torchvision/models/detection/roi_heads.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
1818
"""
1919
Computes the loss for Faster R-CNN.
2020
21-
Arguments:
21+
Args:
2222
class_logits (Tensor)
2323
box_regression (Tensor)
2424
labels (list[BoxList])
@@ -61,7 +61,7 @@ def maskrcnn_inference(x, labels):
6161
probability (which are of fixed size and directly output
6262
by the CNN) and return the masks in the mask field of the BoxList.
6363
64-
Arguments:
64+
Args:
6565
x (Tensor): the mask logits
6666
labels (list[BoxList]): bounding boxes that are used as
6767
reference, one for ech image
@@ -101,7 +101,7 @@ def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M):
101101
def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs):
102102
# type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor
103103
"""
104-
Arguments:
104+
Args:
105105
proposals (list[BoxList])
106106
mask_logits (Tensor)
107107
targets (list[BoxList])
@@ -727,7 +727,7 @@ def forward(self,
727727
):
728728
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
729729
"""
730-
Arguments:
730+
Args:
731731
features (List[Tensor])
732732
proposals (List[Tensor[N, 4]])
733733
image_shapes (List[Tuple[H, W]])

0 commit comments

Comments
 (0)