Skip to content

Commit 5fbc112

Browse files
authored
Merge branch 'master' into models/ssdlite
2 parents da81b69 + c2ab0c5 commit 5fbc112

File tree

10 files changed

+119
-96
lines changed

10 files changed

+119
-96
lines changed

.circleci/config.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ jobs:
337337
binary_macos_wheel:
338338
<<: *binary_common
339339
macos:
340-
xcode: "9.4.1"
340+
xcode: "12.0"
341341
steps:
342342
- checkout_merge
343343
- designate_upload_channel
@@ -397,7 +397,7 @@ jobs:
397397
binary_macos_conda:
398398
<<: *binary_common
399399
macos:
400-
xcode: "9.4.1"
400+
xcode: "12.0"
401401
steps:
402402
- checkout_merge
403403
- designate_upload_channel
@@ -648,7 +648,7 @@ jobs:
648648
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh
649649
- run:
650650
name: Run tests
651-
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
651+
command: docker run -e CIRCLECI -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
652652
- run:
653653
name: Post Process
654654
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh
@@ -739,7 +739,7 @@ jobs:
739739
unittest_macos_cpu:
740740
<<: *binary_common
741741
macos:
742-
xcode: "9.4.1"
742+
xcode: "12.0"
743743
resource_class: large
744744
steps:
745745
- checkout
@@ -815,7 +815,7 @@ jobs:
815815
cmake_macos_cpu:
816816
<<: *binary_common
817817
macos:
818-
xcode: "9.4.1"
818+
xcode: "12.0"
819819
steps:
820820
- checkout_merge
821821
- designate_upload_channel

.circleci/config.yml.in

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ jobs:
337337
binary_macos_wheel:
338338
<<: *binary_common
339339
macos:
340-
xcode: "9.4.1"
340+
xcode: "12.0"
341341
steps:
342342
- checkout_merge
343343
- designate_upload_channel
@@ -397,7 +397,7 @@ jobs:
397397
binary_macos_conda:
398398
<<: *binary_common
399399
macos:
400-
xcode: "9.4.1"
400+
xcode: "12.0"
401401
steps:
402402
- checkout_merge
403403
- designate_upload_channel
@@ -648,7 +648,7 @@ jobs:
648648
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh
649649
- run:
650650
name: Run tests
651-
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
651+
command: docker run -e CIRCLECI -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
652652
- run:
653653
name: Post Process
654654
command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh
@@ -739,7 +739,7 @@ jobs:
739739
unittest_macos_cpu:
740740
<<: *binary_common
741741
macos:
742-
xcode: "9.4.1"
742+
xcode: "12.0"
743743
resource_class: large
744744
steps:
745745
- checkout
@@ -815,7 +815,7 @@ jobs:
815815
cmake_macos_cpu:
816816
<<: *binary_common
817817
macos:
818-
xcode: "9.4.1"
818+
xcode: "12.0"
819819
steps:
820820
- checkout_merge
821821
- designate_upload_channel
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
name: Show CI job with links to built docs
2+
3+
on:
4+
pull_request:
5+
branches: [ master ]
6+
7+
jobs:
8+
circleci_artifacts_redirector_job:
9+
runs-on: ubuntu-latest
10+
name: Run CircleCI artifacts redirector
11+
steps:
12+
- name: GitHub Action step
13+
uses: larsoner/circleci-artifacts-redirector-action@master
14+
with:
15+
repo-token: ${{ secrets.GITHUB_TOKEN }}
16+
artifact-path: 0/docs/index.html
17+
circleci-jobs: build_docs
18+
job-title: See the built docs here!

CONTRIBUTING.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,10 @@ make html
125125

126126
Then open `docs/build/html/index.html` in your favorite browser.
127127

128+
The docs are also automatically built when you submit a PR. The job that
129+
builds the docs is named `build_docs`. If that job passes, a link to the
130+
rendered docs will be available in a job called `See the built docs here!`.
131+
128132
### New model
129133

130134
More details on how to add a new model will be provided later. Please, do not send any PR with a new model without discussing

references/classification/train.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -224,9 +224,9 @@ def main(args):
224224
print('Training time {}'.format(total_time_str))
225225

226226

227-
def parse_args():
227+
def get_args_parser(add_help=True):
228228
import argparse
229-
parser = argparse.ArgumentParser(description='PyTorch Classification Training')
229+
parser = argparse.ArgumentParser(description='PyTorch Classification Training', add_help=add_help)
230230

231231
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', help='dataset')
232232
parser.add_argument('--model', default='resnet18', help='model')
@@ -291,11 +291,9 @@ def parse_args():
291291
help='number of distributed processes')
292292
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
293293

294-
args = parser.parse_args()
295-
296-
return args
294+
return parser
297295

298296

299297
if __name__ == "__main__":
300-
args = parse_args()
298+
args = get_args_parser().parse_args()
301299
main(args)

references/classification/train_quantization.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313

1414

1515
def main(args):
16-
1716
if args.output_dir:
1817
utils.mkdir(args.output_dir)
1918

@@ -162,9 +161,9 @@ def main(args):
162161
print('Training time {}'.format(total_time_str))
163162

164163

165-
def parse_args():
164+
def get_args_parser(add_help=True):
166165
import argparse
167-
parser = argparse.ArgumentParser(description='PyTorch Classification Training')
166+
parser = argparse.ArgumentParser(description='PyTorch Quantized Classification Training', add_help=add_help)
168167

169168
parser.add_argument('--data-path',
170169
default='/datasets01/imagenet_full_size/061417/',
@@ -250,11 +249,9 @@ def parse_args():
250249
default='env://',
251250
help='url used to set up distributed training')
252251

253-
args = parser.parse_args()
254-
255-
return args
252+
return parser
256253

257254

258255
if __name__ == "__main__":
259-
args = parse_args()
256+
args = get_args_parser().parse_args()
260257
main(args)

references/detection/train.py

Lines changed: 69 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,75 @@ def get_transform(train, data_augmentation):
5151
return presets.DetectionPresetTrain(data_augmentation) if train else presets.DetectionPresetEval()
5252

5353

54+
def get_args_parser(add_help=True):
55+
import argparse
56+
parser = argparse.ArgumentParser(description='PyTorch Detection Training', add_help=add_help)
57+
58+
parser.add_argument('--data-path', default='/datasets01/COCO/022719/', help='dataset')
59+
parser.add_argument('--dataset', default='coco', help='dataset')
60+
parser.add_argument('--model', default='maskrcnn_resnet50_fpn', help='model')
61+
parser.add_argument('--device', default='cuda', help='device')
62+
parser.add_argument('-b', '--batch-size', default=2, type=int,
63+
help='images per gpu, the total batch size is $NGPU x batch_size')
64+
parser.add_argument('--epochs', default=26, type=int, metavar='N',
65+
help='number of total epochs to run')
66+
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
67+
help='number of data loading workers (default: 4)')
68+
parser.add_argument('--lr', default=0.02, type=float,
69+
help='initial learning rate, 0.02 is the default value for training '
70+
'on 8 gpus and 2 images_per_gpu')
71+
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
72+
help='momentum')
73+
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
74+
metavar='W', help='weight decay (default: 1e-4)',
75+
dest='weight_decay')
76+
parser.add_argument('--lr-scheduler', default="multisteplr", help='the lr scheduler (default: multisteplr)')
77+
parser.add_argument('--lr-step-size', default=8, type=int,
78+
help='decrease lr every step-size epochs (multisteplr scheduler only)')
79+
parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int,
80+
help='decrease lr every step-size epochs (multisteplr scheduler only)')
81+
parser.add_argument('--lr-gamma', default=0.1, type=float,
82+
help='decrease lr by a factor of lr-gamma (multisteplr scheduler only)')
83+
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
84+
parser.add_argument('--output-dir', default='.', help='path where to save')
85+
parser.add_argument('--resume', default='', help='resume from checkpoint')
86+
parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
87+
parser.add_argument('--aspect-ratio-group-factor', default=3, type=int)
88+
parser.add_argument('--rpn-score-thresh', default=None, type=float, help='rpn score threshold for faster-rcnn')
89+
parser.add_argument('--trainable-backbone-layers', default=None, type=int,
90+
help='number of trainable layers of backbone')
91+
parser.add_argument('--data-augmentation', default="hflip", help='data augmentation policy (default: hflip)')
92+
parser.add_argument(
93+
"--sync-bn",
94+
dest="sync_bn",
95+
help="Use sync batch norm",
96+
action="store_true",
97+
)
98+
parser.add_argument(
99+
"--test-only",
100+
dest="test_only",
101+
help="Only test the model",
102+
action="store_true",
103+
)
104+
parser.add_argument(
105+
"--pretrained",
106+
dest="pretrained",
107+
help="Use pre-trained models from the modelzoo",
108+
action="store_true",
109+
)
110+
111+
# distributed training parameters
112+
parser.add_argument('--world-size', default=1, type=int,
113+
help='number of distributed processes')
114+
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
115+
116+
return parser
117+
118+
54119
def main(args):
120+
if args.output_dir:
121+
utils.mkdir(args.output_dir)
122+
55123
utils.init_distributed_mode(args)
56124
print(args)
57125

@@ -155,71 +223,5 @@ def main(args):
155223

156224

157225
if __name__ == "__main__":
158-
import argparse
159-
parser = argparse.ArgumentParser(
160-
description=__doc__)
161-
162-
parser.add_argument('--data-path', default='/datasets01/COCO/022719/', help='dataset')
163-
parser.add_argument('--dataset', default='coco', help='dataset')
164-
parser.add_argument('--model', default='maskrcnn_resnet50_fpn', help='model')
165-
parser.add_argument('--device', default='cuda', help='device')
166-
parser.add_argument('-b', '--batch-size', default=2, type=int,
167-
help='images per gpu, the total batch size is $NGPU x batch_size')
168-
parser.add_argument('--epochs', default=26, type=int, metavar='N',
169-
help='number of total epochs to run')
170-
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
171-
help='number of data loading workers (default: 4)')
172-
parser.add_argument('--lr', default=0.02, type=float,
173-
help='initial learning rate, 0.02 is the default value for training '
174-
'on 8 gpus and 2 images_per_gpu')
175-
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
176-
help='momentum')
177-
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
178-
metavar='W', help='weight decay (default: 1e-4)',
179-
dest='weight_decay')
180-
parser.add_argument('--lr-scheduler', default="multisteplr", help='the lr scheduler (default: multisteplr)')
181-
parser.add_argument('--lr-step-size', default=8, type=int,
182-
help='decrease lr every step-size epochs (multisteplr scheduler only)')
183-
parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int,
184-
help='decrease lr every step-size epochs (multisteplr scheduler only)')
185-
parser.add_argument('--lr-gamma', default=0.1, type=float,
186-
help='decrease lr by a factor of lr-gamma (multisteplr scheduler only)')
187-
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
188-
parser.add_argument('--output-dir', default='.', help='path where to save')
189-
parser.add_argument('--resume', default='', help='resume from checkpoint')
190-
parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
191-
parser.add_argument('--aspect-ratio-group-factor', default=3, type=int)
192-
parser.add_argument('--rpn-score-thresh', default=None, type=float, help='rpn score threshold for faster-rcnn')
193-
parser.add_argument('--trainable-backbone-layers', default=None, type=int,
194-
help='number of trainable layers of backbone')
195-
parser.add_argument('--data-augmentation', default="hflip", help='data augmentation policy (default: hflip)')
196-
parser.add_argument(
197-
"--sync-bn",
198-
dest="sync_bn",
199-
help="Use sync batch norm",
200-
action="store_true",
201-
)
202-
parser.add_argument(
203-
"--test-only",
204-
dest="test_only",
205-
help="Only test the model",
206-
action="store_true",
207-
)
208-
parser.add_argument(
209-
"--pretrained",
210-
dest="pretrained",
211-
help="Use pre-trained models from the modelzoo",
212-
action="store_true",
213-
)
214-
215-
# distributed training parameters
216-
parser.add_argument('--world-size', default=1, type=int,
217-
help='number of distributed processes')
218-
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
219-
220-
args = parser.parse_args()
221-
222-
if args.output_dir:
223-
utils.mkdir(args.output_dir)
224-
226+
args = get_args_parser().parse_args()
225227
main(args)

references/segmentation/train.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,9 @@ def main(args):
172172
print('Training time {}'.format(total_time_str))
173173

174174

175-
def parse_args():
175+
def get_args_parser(add_help=True):
176176
import argparse
177-
parser = argparse.ArgumentParser(description='PyTorch Segmentation Training')
177+
parser = argparse.ArgumentParser(description='PyTorch Segmentation Training', add_help=add_help)
178178

179179
parser.add_argument('--data-path', default='/datasets01/COCO/022719/', help='dataset path')
180180
parser.add_argument('--dataset', default='coco', help='dataset name')
@@ -215,10 +215,9 @@ def parse_args():
215215
help='number of distributed processes')
216216
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
217217

218-
args = parser.parse_args()
219-
return args
218+
return parser
220219

221220

222221
if __name__ == "__main__":
223-
args = parse_args()
222+
args = get_args_parser().parse_args()
224223
main(args)

test/common_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
IS_PY39 = sys.version_info.major == 3 and sys.version_info.minor == 9
2424
PY39_SEGFAULT_SKIP_MSG = "Segmentation fault with Python 3.9, see https://github.com/pytorch/vision/issues/3367"
2525
PY39_SKIP = unittest.skipIf(IS_PY39, PY39_SEGFAULT_SKIP_MSG)
26+
IN_CIRCLE_CI = os.getenv("CIRCLECI", False) == 'true'
2627

2728

2829
@contextlib.contextmanager

test/test_models.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state, set_rng_seed
1+
import sys
2+
from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state, set_rng_seed, IN_CIRCLE_CI
23
from collections import OrderedDict
34
from itertools import product
45
import functools
@@ -459,6 +460,9 @@ def test_detection_model_validation(model_name):
459460
@pytest.mark.parametrize('model_name', get_available_video_models())
460461
@pytest.mark.parametrize('dev', _devs)
461462
def test_video_model(model_name, dev):
463+
if IN_CIRCLE_CI and 'cuda' in dev.type and model_name == 'r2plus1d_18' and sys.platform == 'linux':
464+
# FIXME: Failure should fixed and test re-actived. See https://github.com/pytorch/vision/issues/3702
465+
pytest.skip('r2plus1d_18 fails on CircleCI linux GPU machines.')
462466
ModelTester()._test_video_model(model_name, dev)
463467

464468

0 commit comments

Comments
 (0)