diff --git a/.jenkins/build.sh b/.jenkins/build.sh index 8786859d7d..87fb31ee45 100755 --- a/.jenkins/build.sh +++ b/.jenkins/build.sh @@ -26,8 +26,8 @@ sudo apt-get install -y pandoc # sudo pip3 install torch==2.6.0 torchvision --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124 # sudo pip uninstall -y fbgemm-gpu torchrec # sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124 -sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict -pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126 +# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict +# pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126 #sudo pip uninstall -y fbgemm-gpu # Install two language tokenizers for Translation with TorchText tutorial python -m spacy download en_core_web_sm diff --git a/intermediate_source/torch_export_tutorial.py b/intermediate_source/torch_export_tutorial.py index 3ca6d09a52..20b1b4023e 100644 --- a/intermediate_source/torch_export_tutorial.py +++ b/intermediate_source/torch_export_tutorial.py @@ -995,7 +995,7 @@ def forward(self, x): # with torch.no_grad(): # pt2_path = torch._inductor.aoti_compile_and_package(ep) # -# # Load and run the .so file in Python. +# # Load and run the .pt2 file in Python. # # To load and run it in a C++ environment, see: # # https://pytorch.org/docs/main/torch.compiler_aot_inductor.html # aoti_compiled = torch._inductor.aoti_load_package(pt2_path) diff --git a/recipes_source/torch_export_aoti_python.py b/recipes_source/torch_export_aoti_python.py index c0cbb7e280..ff311f071e 100644 --- a/recipes_source/torch_export_aoti_python.py +++ b/recipes_source/torch_export_aoti_python.py @@ -176,7 +176,7 @@ model_path = os.path.join(os.getcwd(), "resnet18.pt2") compiled_model = torch._inductor.aoti_load_package(model_path) -example_inputs = (torch.randn(2, 3, 224, 224, device=device),) +example_inputs = torch.randn(2, 3, 224, 224, device=device) with torch.inference_mode(): output = compiled_model(example_inputs) @@ -238,11 +238,11 @@ def timed(fn): torch._dynamo.reset() -model = torch._inductor.aoti_load_package(model_path) -example_inputs = (torch.randn(1, 3, 224, 224, device=device),) +compiled_model = torch._inductor.aoti_load_package(model_path) +example_inputs = torch.randn(1, 3, 224, 224, device=device) with torch.inference_mode(): - _, time_taken = timed(lambda: model(example_inputs)) + _, time_taken = timed(lambda: compiled_model(example_inputs)) print(f"Time taken for first inference for AOTInductor is {time_taken:.2f} ms")