Skip to content

Commit 833ff52

Browse files
datumboxfacebook-github-bot
authored andcommitted
[fbsync] Add types and improve descriptions to ArgumentParser parameters (#4724)
Summary: * Add type to default argument To resolve issue #4694 * Resolve issue #4694 Add missing types on argument parser * Update with ufmt formatted with ufmt * Updated with review Updated with review * Update type of arguments Add train.py from video_classification, similarity and train_quantization.py Reviewed By: NicolasHug Differential Revision: D31916335 fbshipit-source-id: a717a3cac868b567db57b84a545ad9363820179b Co-authored-by: Vasilis Vryniotis <[email protected]>
1 parent 815ca8f commit 833ff52

File tree

6 files changed

+63
-51
lines changed

6 files changed

+63
-51
lines changed

references/classification/train.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -346,10 +346,12 @@ def get_args_parser(add_help=True):
346346

347347
parser = argparse.ArgumentParser(description="PyTorch Classification Training", add_help=add_help)
348348

349-
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", help="dataset")
350-
parser.add_argument("--model", default="resnet18", help="model")
351-
parser.add_argument("--device", default="cuda", help="device")
352-
parser.add_argument("-b", "--batch-size", default=32, type=int)
349+
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", type=str, help="dataset path")
350+
parser.add_argument("--model", default="resnet18", type=str, help="model name")
351+
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
352+
parser.add_argument(
353+
"-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
354+
)
353355
parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run")
354356
parser.add_argument(
355357
"-j", "--workers", default=16, type=int, metavar="N", help="number of data loading workers (default: 16)"
@@ -377,7 +379,7 @@ def get_args_parser(add_help=True):
377379
)
378380
parser.add_argument("--mixup-alpha", default=0.0, type=float, help="mixup alpha (default: 0.0)")
379381
parser.add_argument("--cutmix-alpha", default=0.0, type=float, help="cutmix alpha (default: 0.0)")
380-
parser.add_argument("--lr-scheduler", default="steplr", help="the lr scheduler (default: steplr)")
382+
parser.add_argument("--lr-scheduler", default="steplr", type=str, help="the lr scheduler (default: steplr)")
381383
parser.add_argument("--lr-warmup-epochs", default=0, type=int, help="the number of epochs to warmup (default: 0)")
382384
parser.add_argument(
383385
"--lr-warmup-method", default="constant", type=str, help="the warmup method (default: constant)"
@@ -386,8 +388,8 @@ def get_args_parser(add_help=True):
386388
parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs")
387389
parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
388390
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
389-
parser.add_argument("--output-dir", default=".", help="path where to save")
390-
parser.add_argument("--resume", default="", help="resume from checkpoint")
391+
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
392+
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
391393
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
392394
parser.add_argument(
393395
"--cache-dataset",
@@ -413,15 +415,15 @@ def get_args_parser(add_help=True):
413415
help="Use pre-trained models from the modelzoo",
414416
action="store_true",
415417
)
416-
parser.add_argument("--auto-augment", default=None, help="auto augment policy (default: None)")
418+
parser.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
417419
parser.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
418420

419421
# Mixed precision training parameters
420422
parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training")
421423

422424
# distributed training parameters
423425
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
424-
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
426+
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
425427
parser.add_argument(
426428
"--model-ema", action="store_true", help="enable tracking Exponential Moving Average of model parameters"
427429
)

references/classification/train_quantization.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -155,12 +155,14 @@ def get_args_parser(add_help=True):
155155

156156
parser = argparse.ArgumentParser(description="PyTorch Quantized Classification Training", add_help=add_help)
157157

158-
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", help="dataset")
159-
parser.add_argument("--model", default="mobilenet_v2", help="model")
160-
parser.add_argument("--backend", default="qnnpack", help="fbgemm or qnnpack")
161-
parser.add_argument("--device", default="cuda", help="device")
158+
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", type=str, help="dataset path")
159+
parser.add_argument("--model", default="mobilenet_v2", type=str, help="model name")
160+
parser.add_argument("--backend", default="qnnpack", type=str, help="fbgemm or qnnpack")
161+
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
162162

163-
parser.add_argument("-b", "--batch-size", default=32, type=int, help="batch size for calibration/training")
163+
parser.add_argument(
164+
"-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
165+
)
164166
parser.add_argument("--eval-batch-size", default=128, type=int, help="batch size for evaluation")
165167
parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run")
166168
parser.add_argument(
@@ -203,8 +205,8 @@ def get_args_parser(add_help=True):
203205
parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs")
204206
parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
205207
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
206-
parser.add_argument("--output-dir", default=".", help="path where to save")
207-
parser.add_argument("--resume", default="", help="resume from checkpoint")
208+
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
209+
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
208210
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
209211
parser.add_argument(
210212
"--cache-dataset",
@@ -234,7 +236,7 @@ def get_args_parser(add_help=True):
234236

235237
# distributed training parameters
236238
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
237-
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
239+
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
238240

239241
parser.add_argument(
240242
"--interpolation", default="bilinear", type=str, help="the interpolation method (default: bilinear)"

references/detection/train.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ def get_args_parser(add_help=True):
5050

5151
parser = argparse.ArgumentParser(description="PyTorch Detection Training", add_help=add_help)
5252

53-
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", help="dataset")
54-
parser.add_argument("--dataset", default="coco", help="dataset")
55-
parser.add_argument("--model", default="maskrcnn_resnet50_fpn", help="model")
56-
parser.add_argument("--device", default="cuda", help="device")
53+
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
54+
parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
55+
parser.add_argument("--model", default="maskrcnn_resnet50_fpn", type=str, help="model name")
56+
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
5757
parser.add_argument(
5858
"-b", "--batch-size", default=2, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
5959
)
@@ -77,7 +77,9 @@ def get_args_parser(add_help=True):
7777
help="weight decay (default: 1e-4)",
7878
dest="weight_decay",
7979
)
80-
parser.add_argument("--lr-scheduler", default="multisteplr", help="the lr scheduler (default: multisteplr)")
80+
parser.add_argument(
81+
"--lr-scheduler", default="multisteplr", type=str, help="name of lr scheduler (default: multisteplr)"
82+
)
8183
parser.add_argument(
8284
"--lr-step-size", default=8, type=int, help="decrease lr every step-size epochs (multisteplr scheduler only)"
8385
)
@@ -92,15 +94,17 @@ def get_args_parser(add_help=True):
9294
"--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma (multisteplr scheduler only)"
9395
)
9496
parser.add_argument("--print-freq", default=20, type=int, help="print frequency")
95-
parser.add_argument("--output-dir", default=".", help="path where to save")
96-
parser.add_argument("--resume", default="", help="resume from checkpoint")
97+
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
98+
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
9799
parser.add_argument("--start_epoch", default=0, type=int, help="start epoch")
98100
parser.add_argument("--aspect-ratio-group-factor", default=3, type=int)
99101
parser.add_argument("--rpn-score-thresh", default=None, type=float, help="rpn score threshold for faster-rcnn")
100102
parser.add_argument(
101103
"--trainable-backbone-layers", default=None, type=int, help="number of trainable layers of backbone"
102104
)
103-
parser.add_argument("--data-augmentation", default="hflip", help="data augmentation policy (default: hflip)")
105+
parser.add_argument(
106+
"--data-augmentation", default="hflip", type=str, help="data augmentation policy (default: hflip)"
107+
)
104108
parser.add_argument(
105109
"--sync-bn",
106110
dest="sync_bn",
@@ -122,7 +126,7 @@ def get_args_parser(add_help=True):
122126

123127
# distributed training parameters
124128
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
125-
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
129+
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
126130

127131
return parser
128132

references/segmentation/train.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -201,12 +201,14 @@ def get_args_parser(add_help=True):
201201

202202
parser = argparse.ArgumentParser(description="PyTorch Segmentation Training", add_help=add_help)
203203

204-
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", help="dataset path")
205-
parser.add_argument("--dataset", default="coco", help="dataset name")
206-
parser.add_argument("--model", default="fcn_resnet101", help="model")
204+
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
205+
parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
206+
parser.add_argument("--model", default="fcn_resnet101", type=str, help="model name")
207207
parser.add_argument("--aux-loss", action="store_true", help="auxiliar loss")
208-
parser.add_argument("--device", default="cuda", help="device")
209-
parser.add_argument("-b", "--batch-size", default=8, type=int)
208+
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
209+
parser.add_argument(
210+
"-b", "--batch-size", default=8, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
211+
)
210212
parser.add_argument("--epochs", default=30, type=int, metavar="N", help="number of total epochs to run")
211213

212214
parser.add_argument(
@@ -227,8 +229,8 @@ def get_args_parser(add_help=True):
227229
parser.add_argument("--lr-warmup-method", default="linear", type=str, help="the warmup method (default: linear)")
228230
parser.add_argument("--lr-warmup-decay", default=0.01, type=float, help="the decay for lr")
229231
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
230-
parser.add_argument("--output-dir", default=".", help="path where to save")
231-
parser.add_argument("--resume", default="", help="resume from checkpoint")
232+
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
233+
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
232234
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
233235
parser.add_argument(
234236
"--test-only",
@@ -244,7 +246,7 @@ def get_args_parser(add_help=True):
244246
)
245247
# distributed training parameters
246248
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
247-
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
249+
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
248250

249251
return parser
250252

references/similarity/train.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -137,19 +137,19 @@ def parse_args():
137137

138138
parser = argparse.ArgumentParser(description="PyTorch Embedding Learning")
139139

140-
parser.add_argument("--dataset-dir", default="/tmp/fmnist/", help="FashionMNIST dataset directory path")
140+
parser.add_argument("--dataset-dir", default="/tmp/fmnist/", type=str, help="FashionMNIST dataset directory path")
141141
parser.add_argument(
142142
"-p", "--labels-per-batch", default=8, type=int, help="Number of unique labels/classes per batch"
143143
)
144144
parser.add_argument("-k", "--samples-per-label", default=8, type=int, help="Number of samples per label in a batch")
145-
parser.add_argument("--eval-batch-size", default=512, type=int)
146-
parser.add_argument("--epochs", default=10, type=int, metavar="N", help="Number of training epochs to run")
147-
parser.add_argument("-j", "--workers", default=4, type=int, metavar="N", help="Number of data loading workers")
148-
parser.add_argument("--lr", default=0.0001, type=float, help="Learning rate")
145+
parser.add_argument("--eval-batch-size", default=512, type=int, help="batch size for evaluation")
146+
parser.add_argument("--epochs", default=10, type=int, metavar="N", help="number of total epochs to run")
147+
parser.add_argument("-j", "--workers", default=4, type=int, metavar="N", help="number of data loading workers")
148+
parser.add_argument("--lr", default=0.0001, type=float, help="initial learning rate")
149149
parser.add_argument("--margin", default=0.2, type=float, help="Triplet loss margin")
150-
parser.add_argument("--print-freq", default=20, type=int, help="Print frequency")
151-
parser.add_argument("--save-dir", default=".", help="Model save directory")
152-
parser.add_argument("--resume", default="", help="Resume from checkpoint")
150+
parser.add_argument("--print-freq", default=20, type=int, help="print frequency")
151+
parser.add_argument("--save-dir", default=".", type=str, help="Model save directory")
152+
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
153153

154154
return parser.parse_args()
155155

references/video_classification/train.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -288,16 +288,18 @@ def parse_args():
288288

289289
parser = argparse.ArgumentParser(description="PyTorch Video Classification Training")
290290

291-
parser.add_argument("--data-path", default="/datasets01_101/kinetics/070618/", help="dataset")
292-
parser.add_argument("--train-dir", default="train_avi-480p", help="name of train dir")
293-
parser.add_argument("--val-dir", default="val_avi-480p", help="name of val dir")
294-
parser.add_argument("--model", default="r2plus1d_18", help="model")
295-
parser.add_argument("--device", default="cuda", help="device")
291+
parser.add_argument("--data-path", default="/datasets01_101/kinetics/070618/", type=str, help="dataset path")
292+
parser.add_argument("--train-dir", default="train_avi-480p", type=str, help="name of train dir")
293+
parser.add_argument("--val-dir", default="val_avi-480p", type=str, help="name of val dir")
294+
parser.add_argument("--model", default="r2plus1d_18", type=str, help="model name")
295+
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
296296
parser.add_argument("--clip-len", default=16, type=int, metavar="N", help="number of frames per clip")
297297
parser.add_argument(
298298
"--clips-per-video", default=5, type=int, metavar="N", help="maximum number of clips per video to consider"
299299
)
300-
parser.add_argument("-b", "--batch-size", default=24, type=int)
300+
parser.add_argument(
301+
"-b", "--batch-size", default=24, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
302+
)
301303
parser.add_argument("--epochs", default=45, type=int, metavar="N", help="number of total epochs to run")
302304
parser.add_argument(
303305
"-j", "--workers", default=10, type=int, metavar="N", help="number of data loading workers (default: 10)"
@@ -319,8 +321,8 @@ def parse_args():
319321
parser.add_argument("--lr-warmup-method", default="linear", type=str, help="the warmup method (default: linear)")
320322
parser.add_argument("--lr-warmup-decay", default=0.001, type=float, help="the decay for lr")
321323
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
322-
parser.add_argument("--output-dir", default=".", help="path where to save")
323-
parser.add_argument("--resume", default="", help="resume from checkpoint")
324+
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
325+
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
324326
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
325327
parser.add_argument(
326328
"--cache-dataset",
@@ -360,7 +362,7 @@ def parse_args():
360362

361363
# distributed training parameters
362364
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
363-
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
365+
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
364366

365367
args = parser.parse_args()
366368

0 commit comments

Comments
 (0)