@@ -51,7 +51,75 @@ def get_transform(train, data_augmentation):
51
51
return presets .DetectionPresetTrain (data_augmentation ) if train else presets .DetectionPresetEval ()
52
52
53
53
54
+ def get_args_parser (add_help = True ):
55
+ import argparse
56
+ parser = argparse .ArgumentParser (description = 'PyTorch Detection Training' , add_help = add_help )
57
+
58
+ parser .add_argument ('--data-path' , default = '/datasets01/COCO/022719/' , help = 'dataset' )
59
+ parser .add_argument ('--dataset' , default = 'coco' , help = 'dataset' )
60
+ parser .add_argument ('--model' , default = 'maskrcnn_resnet50_fpn' , help = 'model' )
61
+ parser .add_argument ('--device' , default = 'cuda' , help = 'device' )
62
+ parser .add_argument ('-b' , '--batch-size' , default = 2 , type = int ,
63
+ help = 'images per gpu, the total batch size is $NGPU x batch_size' )
64
+ parser .add_argument ('--epochs' , default = 26 , type = int , metavar = 'N' ,
65
+ help = 'number of total epochs to run' )
66
+ parser .add_argument ('-j' , '--workers' , default = 4 , type = int , metavar = 'N' ,
67
+ help = 'number of data loading workers (default: 4)' )
68
+ parser .add_argument ('--lr' , default = 0.02 , type = float ,
69
+ help = 'initial learning rate, 0.02 is the default value for training '
70
+ 'on 8 gpus and 2 images_per_gpu' )
71
+ parser .add_argument ('--momentum' , default = 0.9 , type = float , metavar = 'M' ,
72
+ help = 'momentum' )
73
+ parser .add_argument ('--wd' , '--weight-decay' , default = 1e-4 , type = float ,
74
+ metavar = 'W' , help = 'weight decay (default: 1e-4)' ,
75
+ dest = 'weight_decay' )
76
+ parser .add_argument ('--lr-scheduler' , default = "multisteplr" , help = 'the lr scheduler (default: multisteplr)' )
77
+ parser .add_argument ('--lr-step-size' , default = 8 , type = int ,
78
+ help = 'decrease lr every step-size epochs (multisteplr scheduler only)' )
79
+ parser .add_argument ('--lr-steps' , default = [16 , 22 ], nargs = '+' , type = int ,
80
+ help = 'decrease lr every step-size epochs (multisteplr scheduler only)' )
81
+ parser .add_argument ('--lr-gamma' , default = 0.1 , type = float ,
82
+ help = 'decrease lr by a factor of lr-gamma (multisteplr scheduler only)' )
83
+ parser .add_argument ('--print-freq' , default = 20 , type = int , help = 'print frequency' )
84
+ parser .add_argument ('--output-dir' , default = '.' , help = 'path where to save' )
85
+ parser .add_argument ('--resume' , default = '' , help = 'resume from checkpoint' )
86
+ parser .add_argument ('--start_epoch' , default = 0 , type = int , help = 'start epoch' )
87
+ parser .add_argument ('--aspect-ratio-group-factor' , default = 3 , type = int )
88
+ parser .add_argument ('--rpn-score-thresh' , default = None , type = float , help = 'rpn score threshold for faster-rcnn' )
89
+ parser .add_argument ('--trainable-backbone-layers' , default = None , type = int ,
90
+ help = 'number of trainable layers of backbone' )
91
+ parser .add_argument ('--data-augmentation' , default = "hflip" , help = 'data augmentation policy (default: hflip)' )
92
+ parser .add_argument (
93
+ "--sync-bn" ,
94
+ dest = "sync_bn" ,
95
+ help = "Use sync batch norm" ,
96
+ action = "store_true" ,
97
+ )
98
+ parser .add_argument (
99
+ "--test-only" ,
100
+ dest = "test_only" ,
101
+ help = "Only test the model" ,
102
+ action = "store_true" ,
103
+ )
104
+ parser .add_argument (
105
+ "--pretrained" ,
106
+ dest = "pretrained" ,
107
+ help = "Use pre-trained models from the modelzoo" ,
108
+ action = "store_true" ,
109
+ )
110
+
111
+ # distributed training parameters
112
+ parser .add_argument ('--world-size' , default = 1 , type = int ,
113
+ help = 'number of distributed processes' )
114
+ parser .add_argument ('--dist-url' , default = 'env://' , help = 'url used to set up distributed training' )
115
+
116
+ return parser
117
+
118
+
54
119
def main (args ):
120
+ if args .output_dir :
121
+ utils .mkdir (args .output_dir )
122
+
55
123
utils .init_distributed_mode (args )
56
124
print (args )
57
125
@@ -155,71 +223,5 @@ def main(args):
155
223
156
224
157
225
if __name__ == "__main__" :
158
- import argparse
159
- parser = argparse .ArgumentParser (
160
- description = __doc__ )
161
-
162
- parser .add_argument ('--data-path' , default = '/datasets01/COCO/022719/' , help = 'dataset' )
163
- parser .add_argument ('--dataset' , default = 'coco' , help = 'dataset' )
164
- parser .add_argument ('--model' , default = 'maskrcnn_resnet50_fpn' , help = 'model' )
165
- parser .add_argument ('--device' , default = 'cuda' , help = 'device' )
166
- parser .add_argument ('-b' , '--batch-size' , default = 2 , type = int ,
167
- help = 'images per gpu, the total batch size is $NGPU x batch_size' )
168
- parser .add_argument ('--epochs' , default = 26 , type = int , metavar = 'N' ,
169
- help = 'number of total epochs to run' )
170
- parser .add_argument ('-j' , '--workers' , default = 4 , type = int , metavar = 'N' ,
171
- help = 'number of data loading workers (default: 4)' )
172
- parser .add_argument ('--lr' , default = 0.02 , type = float ,
173
- help = 'initial learning rate, 0.02 is the default value for training '
174
- 'on 8 gpus and 2 images_per_gpu' )
175
- parser .add_argument ('--momentum' , default = 0.9 , type = float , metavar = 'M' ,
176
- help = 'momentum' )
177
- parser .add_argument ('--wd' , '--weight-decay' , default = 1e-4 , type = float ,
178
- metavar = 'W' , help = 'weight decay (default: 1e-4)' ,
179
- dest = 'weight_decay' )
180
- parser .add_argument ('--lr-scheduler' , default = "multisteplr" , help = 'the lr scheduler (default: multisteplr)' )
181
- parser .add_argument ('--lr-step-size' , default = 8 , type = int ,
182
- help = 'decrease lr every step-size epochs (multisteplr scheduler only)' )
183
- parser .add_argument ('--lr-steps' , default = [16 , 22 ], nargs = '+' , type = int ,
184
- help = 'decrease lr every step-size epochs (multisteplr scheduler only)' )
185
- parser .add_argument ('--lr-gamma' , default = 0.1 , type = float ,
186
- help = 'decrease lr by a factor of lr-gamma (multisteplr scheduler only)' )
187
- parser .add_argument ('--print-freq' , default = 20 , type = int , help = 'print frequency' )
188
- parser .add_argument ('--output-dir' , default = '.' , help = 'path where to save' )
189
- parser .add_argument ('--resume' , default = '' , help = 'resume from checkpoint' )
190
- parser .add_argument ('--start_epoch' , default = 0 , type = int , help = 'start epoch' )
191
- parser .add_argument ('--aspect-ratio-group-factor' , default = 3 , type = int )
192
- parser .add_argument ('--rpn-score-thresh' , default = None , type = float , help = 'rpn score threshold for faster-rcnn' )
193
- parser .add_argument ('--trainable-backbone-layers' , default = None , type = int ,
194
- help = 'number of trainable layers of backbone' )
195
- parser .add_argument ('--data-augmentation' , default = "hflip" , help = 'data augmentation policy (default: hflip)' )
196
- parser .add_argument (
197
- "--sync-bn" ,
198
- dest = "sync_bn" ,
199
- help = "Use sync batch norm" ,
200
- action = "store_true" ,
201
- )
202
- parser .add_argument (
203
- "--test-only" ,
204
- dest = "test_only" ,
205
- help = "Only test the model" ,
206
- action = "store_true" ,
207
- )
208
- parser .add_argument (
209
- "--pretrained" ,
210
- dest = "pretrained" ,
211
- help = "Use pre-trained models from the modelzoo" ,
212
- action = "store_true" ,
213
- )
214
-
215
- # distributed training parameters
216
- parser .add_argument ('--world-size' , default = 1 , type = int ,
217
- help = 'number of distributed processes' )
218
- parser .add_argument ('--dist-url' , default = 'env://' , help = 'url used to set up distributed training' )
219
-
220
- args = parser .parse_args ()
221
-
222
- if args .output_dir :
223
- utils .mkdir (args .output_dir )
224
-
226
+ args = get_args_parser ().parse_args ()
225
227
main (args )
0 commit comments