Skip to content

Commit c4684d2

Browse files
committed
Nightly: do test install with the dependencies better and skip CUDA tests on cpu only box
1 parent e2e4542 commit c4684d2

File tree

3 files changed

+30
-25
lines changed

3 files changed

+30
-25
lines changed

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,7 @@ Folders:
1010
- **windows** : scripts to build Windows wheels
1111
- **cron** : scripts to drive all of the above scripts across multiple configurations together
1212
- **analytics** : scripts to pull wheel download count from our AWS s3 logs
13+
14+
## Testing
15+
16+
In order to test build triggered by PyTorch repo's GitHub actions see [these instructions](https://github.com/pytorch/pytorch/blob/master/.github/scripts/README.md#testing-pytorchbuilder-changes)

conda/build_pytorch.sh

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,17 @@ for py_ver in "${DESIRED_PYTHON[@]}"; do
388388

389389
# Install the built package and run tests, unless it's for mac cross compiled arm64
390390
if [[ -z "$CROSS_COMPILE_ARM64" ]]; then
391-
conda install -y "$built_package"
391+
# Install the package as if from local repo instead of tar.bz2 directly in order
392+
# to trigger runtime dependency installation. See https://github.com/conda/conda/issues/1884
393+
# Notes:
394+
# - pytorch-nightly is included to install torchtriton
395+
# - nvidia is included for cuda builds, there's no harm in listing the channel for cpu builds
396+
if [[ "$OSTYPE" == "msys" ]]; then
397+
local_channel="$(pwd -W)/$output_folder"
398+
else
399+
local_channel="$(pwd)/$output_folder"
400+
fi
401+
conda install -y -c "file://$local_channel" pytorch==$PYTORCH_BUILD_VERSION -c pytorch -c numba/label/dev -c pytorch-nightly -c nvidia
392402

393403
echo "$(date) :: Running tests"
394404
pushd "$pytorch_rootdir"

run_tests.sh

Lines changed: 15 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -72,21 +72,6 @@ fi
7272

7373
# Environment initialization
7474
if [[ "$package_type" == conda || "$(uname)" == Darwin ]]; then
75-
# Why are there two different ways to install dependencies after installing an offline package?
76-
# The "cpu" conda package for pytorch doesn't actually depend on "cpuonly" which means that
77-
# when we attempt to update dependencies using "conda update --all" it will attempt to install
78-
# whatever "cudatoolkit" your current computer relies on (which is sometimes none). When conda
79-
# tries to install this cudatoolkit that correlates with your current hardware it will also
80-
# overwrite the currently installed "local" pytorch package meaning you aren't actually testing
81-
# the right package.
82-
# TODO (maybe): Make the "cpu" package of pytorch depend on "cpuonly"
83-
if [[ "$cuda_ver" = 'cpu' ]]; then
84-
# Installing cpuonly will also install dependencies as well
85-
retry conda install -y -c pytorch cpuonly
86-
else
87-
# Install dependencies from installing the pytorch conda package offline
88-
retry conda update -yq --all -c defaults -c pytorch -c numba/label/dev
89-
fi
9075
# Install the testing dependencies
9176
retry conda install -yq future hypothesis ${NUMPY_PACKAGE} ${PROTOBUF_PACKAGE} pytest setuptools six typing_extensions pyyaml
9277
else
@@ -140,15 +125,21 @@ python -c "import torch; exit(0 if torch.__version__ == '$expected_version' else
140125

141126
# Test that CUDA builds are setup correctly
142127
if [[ "$cuda_ver" != 'cpu' ]]; then
143-
# Test CUDA archs
144-
echo "Checking that CUDA archs are setup correctly"
145-
timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()'
146-
147-
# These have to run after CUDA is initialized
148-
echo "Checking that magma is available"
149-
python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)'
150-
echo "Checking that CuDNN is available"
151-
python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)'
128+
cuda_installed=1
129+
nvidia-smi || cuda_installed=0
130+
if [[ "$cuda_installed" == 0 ]]; then
131+
echo "Skip CUDA tests for machines without a Nvidia GPU card"
132+
else
133+
# Test CUDA archs
134+
echo "Checking that CUDA archs are setup correctly"
135+
timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()'
136+
137+
# These have to run after CUDA is initialized
138+
echo "Checking that magma is available"
139+
python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)'
140+
echo "Checking that CuDNN is available"
141+
python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)'
142+
fi
152143
fi
153144

154145
# Check that OpenBlas is not linked to on Macs

0 commit comments

Comments
 (0)