Skip to content

Commit 257d18a

Browse files
authored
[xpu][test] Refine the pytest command for intel XPU (#3355)
* refine thr pytest command * add dependency * update * update device * update skip
1 parent 4f5bc7a commit 257d18a

File tree

3 files changed

+11
-13
lines changed

3 files changed

+11
-13
lines changed

.github/scripts/ci_test_xpu.sh

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,11 @@ cd torchao && pip install . --no-build-isolation && cd ..
1212

1313
python3 -c "import torch; import torchao; print(f'Torch version: {torch.__version__}')"
1414

15-
pip install pytest expecttest parameterized accelerate hf_transfer 'modelscope!=1.15.0'
16-
17-
pytest -v -s torchao/test/quantization/
18-
19-
pytest -v -s torchao/test/dtypes/
20-
21-
pytest -v -s torchao/test/float8/
22-
23-
pytest -v -s torchao/test/integration/test_integration.py
24-
25-
pytest -v -s torchao/test/prototype/
26-
27-
pytest -v -s torchao/test/test_ao_models.py
15+
pip install pytest expecttest parameterized accelerate hf_transfer 'modelscope!=1.15.0' transformers tabulate fire
16+
17+
pytest -v -s torchao/test/quantization/ \
18+
torchao/test/dtypes/ \
19+
torchao/test/float8/ \
20+
torchao/test/integration/test_integration.py \
21+
torchao/test/prototype/ \
22+
torchao/test/test_ao_models.py

test/dtypes/test_nf4.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -756,6 +756,7 @@ def world_size(self) -> int:
756756
return 2
757757

758758
@skip_if_lt_x_gpu(2)
759+
@unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available")
759760
def test_comm(self):
760761
self.run_subtests(
761762
{"input_size": [512, 2048]},

test/prototype/test_quantized_training.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -296,6 +296,7 @@ def world_size(self) -> int:
296296
return _FSDP_WORLD_SIZE
297297

298298
@skip_if_lt_x_gpu(_FSDP_WORLD_SIZE)
299+
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
299300
def test_fsdp2_correctness(self):
300301
mp_policy = MixedPrecisionPolicy()
301302

@@ -386,6 +387,7 @@ def _run_subtest(self, args):
386387
)
387388

388389
@skip_if_lt_x_gpu(_FSDP_WORLD_SIZE)
390+
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
389391
def test_precompute_bitnet_scale(self):
390392
from torchao.prototype.quantized_training.bitnet import (
391393
get_bitnet_scale,

0 commit comments

Comments
 (0)