diff --git a/advanced_source/torch_script_custom_ops.rst b/advanced_source/torch_script_custom_ops.rst index 1c8d722952e..66f18bd6947 100644 --- a/advanced_source/torch_script_custom_ops.rst +++ b/advanced_source/torch_script_custom_ops.rst @@ -23,7 +23,7 @@ Python and in their serialized form directly in C++. The following paragraphs give an example of writing a TorchScript custom op to call into `OpenCV `_, a computer vision library written in C++. We will discuss how to work with tensors in C++, how to efficiently -convert them to third party tensor formats (in this case, OpenCV ``Mat``s), how +convert them to third party tensor formats (in this case, OpenCV ``Mat`` s), how to register your operator with the TorchScript runtime and finally how to compile the operator and use it in Python and C++. @@ -1018,7 +1018,7 @@ expects from a module), this route can be slightly quirky. That said, all you need is a ``setup.py`` file in place of the ``CMakeLists.txt`` which looks like this: -.. code-block:: +.. code-block:: python from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CppExtension @@ -1081,7 +1081,7 @@ This will produce a shared library called ``warp_perspective.so``, which we can pass to ``torch.ops.load_library`` as we did earlier to make our operator visible to TorchScript: -.. code-block:: +.. code-block:: python >>> import torch >>> torch.ops.load_library("warp_perspective.so") diff --git a/beginner_source/blitz/cifar10_tutorial.py b/beginner_source/blitz/cifar10_tutorial.py index 1fe8343a792..f105cd04fb7 100644 --- a/beginner_source/blitz/cifar10_tutorial.py +++ b/beginner_source/blitz/cifar10_tutorial.py @@ -108,7 +108,7 @@ def imshow(img): ######################################################################## # 2. Define a Convolutional Neural Network -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Copy the neural network from the Neural Networks section before and modify it to # take 3-channel images (instead of 1-channel images as it was defined). diff --git a/beginner_source/nn_tutorial.py b/beginner_source/nn_tutorial.py index adbd56d059e..d9a1c3edada 100644 --- a/beginner_source/nn_tutorial.py +++ b/beginner_source/nn_tutorial.py @@ -322,6 +322,7 @@ def forward(self, xb): # Previously for our training loop we had to update the values for each parameter # by name, and manually zero out the grads for each parameter separately, like this: # :: +# # with torch.no_grad(): # weights -= weights.grad * lr # bias -= bias.grad * lr @@ -334,6 +335,7 @@ def forward(self, xb): # and less prone to the error of forgetting some of our parameters, particularly # if we had a more complicated model: # :: +# # with torch.no_grad(): # for p in model.parameters(): p -= p.grad * lr # model.zero_grad() @@ -408,12 +410,14 @@ def forward(self, xb): # # This will let us replace our previous manually coded optimization step: # :: +# # with torch.no_grad(): # for p in model.parameters(): p -= p.grad * lr # model.zero_grad() # # and instead use just: # :: +# # opt.step() # opt.zero_grad() # @@ -476,12 +480,14 @@ def get_model(): ############################################################################### # Previously, we had to iterate through minibatches of x and y values separately: # :: +# # xb = x_train[start_i:end_i] # yb = y_train[start_i:end_i] # # # Now, we can do these two steps together: # :: +# # xb,yb = train_ds[i*bs : i*bs+bs] # @@ -516,12 +522,14 @@ def get_model(): ############################################################################### # Previously, our loop iterated over batches (xb, yb) like this: # :: +# # for i in range((n-1)//bs + 1): # xb,yb = train_ds[i*bs : i*bs+bs] # pred = model(xb) # # Now, our loop is much cleaner, as (xb, yb) are loaded automatically from the data loader: # :: +# # for xb,yb in train_dl: # pred = model(xb)