From f1385172ee1b0c4b6728d038e4873930f5163c97 Mon Sep 17 00:00:00 2001 From: Sayantan Das <36279638+ucalyptus@users.noreply.github.com> Date: Wed, 26 Aug 2020 20:46:26 +0530 Subject: [PATCH] Corrected model.resnet50() spelling Spelling mistake led to errors for beginners. --- intermediate_source/model_parallel_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/intermediate_source/model_parallel_tutorial.py b/intermediate_source/model_parallel_tutorial.py index 515b689301a..df43f6fa5f9 100644 --- a/intermediate_source/model_parallel_tutorial.py +++ b/intermediate_source/model_parallel_tutorial.py @@ -86,7 +86,7 @@ def forward(self, x): # # It is also possible to run an existing single-GPU module on multiple GPUs # with just a few lines of changes. The code below shows how to decompose -# ``torchvision.models.reset50()`` to two GPUs. The idea is to inherit from +# ``torchvision.models.resnet50()`` to two GPUs. The idea is to inherit from # the existing ``ResNet`` module, and split the layers to two GPUs during # construction. Then, override the ``forward`` method to stitch two # sub-networks by moving the intermediate outputs accordingly. @@ -136,7 +136,7 @@ def forward(self, x): # # Let us run an experiment to get a more quantitative view of the execution # time. In this experiment, we train ``ModelParallelResNet50`` and the existing -# ``torchvision.models.reset50()`` by running random inputs and labels through +# ``torchvision.models.resnet50()`` by running random inputs and labels through # them. After the training, the models will not produce any useful predictions, # but we can get a reasonable understanding of the execution times.