Skip to content

Commit 7d4c9fd

Browse files
CI: Fix issues caused by pytest v9 (#2904)
Issues with pyproject.toml Since the recent pytest v9 release, PEFT's pyproject.toml does not work anymore. This is because it mixes both new and old format new formats (ini_options), which is no longer allowed. This PR fixes the issue by switching fully to the new format. Note that if a pytest version < 9.0.0 is used, the added pytest options will be ignored. Thus please update to 9.0.0+. Context: https://github.com/pytest-dev/pytest/releases/tag/9.0.0 Issues with unittest.skip Since pytest v9, unittest.skip raises an error instead of just skipping the corresponding test (see these errors). As of writing this, it is unclear if this will be fixed or not: pytest-dev/pytest#13895 To avoid the error, we now use pytest.mark.skip insted of unittest.skip / unittest.skipUnless. Tangential changes 1. Remove a few inheritances from unittest.TestCase that were unnecessary 2. Skip the whole test_gpu_examples.py module if no accelerator is available (faster than checking each class/function individually)
1 parent e82e72a commit 7d4c9fd

File tree

4 files changed

+38
-40
lines changed

4 files changed

+38
-40
lines changed

pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,13 @@ doctest_optionflags = [
4141
"NUMBER",
4242
]
4343

44-
[tool.pytest.ini_options]
45-
addopts = "--cov=src/peft --cov-report=term-missing --durations=10"
44+
addopts = ["--cov=src/peft", "--cov-report=term-missing", "--durations=10"]
45+
4646
markers = [
4747
"single_gpu_tests: tests that run on a single GPU",
4848
"multi_gpu_tests: tests that run on multiple GPUs",
4949
"regression: whether to run regression suite test",
50-
"bitsandbytes: select bitsandbytes integration tests"
50+
"bitsandbytes: select bitsandbytes integration tests",
5151
]
5252

5353
filterwarnings = [

tests/test_gpu_examples.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
import pytest
2828
import torch
2929
from accelerate import infer_auto_device_map
30-
from accelerate.test_utils.testing import run_command
30+
from accelerate.test_utils.testing import get_backend, run_command
3131
from accelerate.utils import patch_environment
3232
from accelerate.utils.imports import is_bf16_available
3333
from accelerate.utils.memory import clear_device_cache
@@ -108,6 +108,11 @@
108108
)
109109

110110

111+
device, _, _ = get_backend()
112+
if device == "cpu":
113+
pytest.skip(allow_module_level=True, reason="GPU tests require hardware accelerator, got CPU only")
114+
115+
111116
# Some tests with multi GPU require specific device maps to ensure that the models are loaded in two devices
112117
DEVICE_MAP_MAP: dict[str, dict[str, int]] = {
113118
"facebook/opt-6.7b": {
@@ -3460,9 +3465,9 @@ def test_load_adapter_using_float16_autocast_dtype(self):
34603465
@require_non_xpu
34613466
@require_torch_gpu
34623467
@require_aqlm
3463-
@unittest.skipUnless(
3464-
version.parse(importlib.metadata.version("transformers")) >= version.parse("4.38.0"),
3465-
"test requires `transformers>=4.38.0`",
3468+
@pytest.mark.skipif(
3469+
not version.parse(importlib.metadata.version("transformers")) >= version.parse("4.38.0"),
3470+
reason="test requires `transformers>=4.38.0`",
34663471
)
34673472
class PeftAqlmGPUTests(unittest.TestCase):
34683473
r"""
@@ -3545,9 +3550,9 @@ def test_causal_lm_training_aqlm(self):
35453550
@require_non_xpu
35463551
@require_torch_gpu
35473552
@require_hqq
3548-
@unittest.skipUnless(
3549-
version.parse(importlib.metadata.version("transformers")) >= version.parse("4.36.1"),
3550-
"test requires `transformers>=4.36.1`",
3553+
@pytest.mark.skipif(
3554+
not version.parse(importlib.metadata.version("transformers")) >= version.parse("4.36.1"),
3555+
reason="test requires `transformers>=4.36.1`",
35513556
)
35523557
class PeftHqqGPUTests(unittest.TestCase):
35533558
r"""

tests/test_tuners_utils.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
# limitations under the License.
1717
import dataclasses
1818
import re
19-
import unittest
2019
from copy import deepcopy
2120

2221
import pytest
@@ -179,7 +178,7 @@
179178
BNB_TEST_CASES = [(x + y) for x in MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES for y in BNB_QUANTIZATIONS]
180179

181180

182-
class PeftCustomKwargsTester(unittest.TestCase):
181+
class PeftCustomKwargsTester:
183182
r"""
184183
Test if the PeftModel is instantiated with correct behaviour for custom kwargs. This includes:
185184
- test if regex matching works correctly
@@ -444,7 +443,7 @@ def __init__(self, bias=True):
444443
self.sm = nn.LogSoftmax(dim=-1)
445444

446445

447-
class TestTargetedModuleNames(unittest.TestCase):
446+
class TestTargetedModuleNames:
448447
"""Check that the attribute targeted_module_names is correctly set.
449448
450449
This checks LoRA and IA³, but this should be sufficient, testing all other tuners is not necessary.
@@ -492,7 +491,7 @@ def test_realistic_example(self):
492491
assert model.targeted_module_names == expected
493492

494493

495-
class TestTargetedParameterNames(unittest.TestCase):
494+
class TestTargetedParameterNames:
496495
"""Check that the attribute targeted_parameter_names (via target_parameters) is correctly set.
497496
498497
This is only implemented for LoRA. Regex matching is currently not implemented.
@@ -520,7 +519,7 @@ def test_realistic_example(self):
520519
assert model.targeted_parameter_names == expected
521520

522521

523-
class TestExcludedModuleNames(unittest.TestCase):
522+
class TestExcludedModuleNames:
524523
"""Check that the attribute exclude_module is correctly set.
525524
526525
This checks LoRA and IA³, but this should be sufficient, testing all other tuners is not necessary.
@@ -1527,7 +1526,7 @@ class ModelWithNoConfig(nn.Module):
15271526
pass
15281527

15291528

1530-
class TestBaseTunerGetModelConfig(unittest.TestCase):
1529+
class TestBaseTunerGetModelConfig:
15311530
def test_get_model_config_use_to_dict(self):
15321531
config = BaseTuner.get_model_config(ModelWithConfig())
15331532
assert config == MockModelConfig.config

tests/testing_utils.py

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
import os
15-
import unittest
1615
from contextlib import contextmanager
1716
from functools import lru_cache, wraps
1817
from unittest import mock
@@ -55,44 +54,40 @@ def require_non_cpu(test_case):
5554
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
5655
hardware accelerator available.
5756
"""
58-
return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)
57+
return pytest.mark.skipif(torch_device == "cpu", reason="test requires a hardware accelerator")(test_case)
5958

6059

6160
def require_non_xpu(test_case):
6261
"""
6362
Decorator marking a test that should be skipped for XPU.
6463
"""
65-
return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case)
64+
return pytest.mark.skipif(torch_device == "xpu", reason="test requires a non-XPU")(test_case)
6665

6766

6867
def require_torch_gpu(test_case):
6968
"""
7069
Decorator marking a test that requires a GPU. Will be skipped when no GPU is available.
7170
"""
72-
if not torch.cuda.is_available():
73-
return unittest.skip("test requires GPU")(test_case)
74-
else:
75-
return test_case
71+
return pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU")(test_case)
7672

7773

7874
def require_torch_multi_gpu(test_case):
7975
"""
8076
Decorator marking a test that requires multiple GPUs. Will be skipped when less than 2 GPUs are available.
8177
"""
82-
if not torch.cuda.is_available() or torch.cuda.device_count() < 2:
83-
return unittest.skip("test requires multiple GPUs")(test_case)
84-
else:
85-
return test_case
78+
multi_cuda_unavailable = not torch.cuda.is_available() or (device_count < 2)
79+
return pytest.mark.skipif(multi_cuda_unavailable, reason="test requires multiple GPUs")(test_case)
8680

8781

8882
def require_torch_multi_accelerator(test_case):
8983
"""
9084
Decorator marking a test that requires multiple hardware accelerators. These tests are skipped on a machine without
9185
multiple accelerators.
9286
"""
93-
return unittest.skipUnless(
94-
torch_device != "cpu" and device_count > 1, "test requires multiple hardware accelerators"
95-
)(test_case)
87+
multi_device_unavailable = (torch_device == "cpu") or (device_count < 2)
88+
return pytest.mark.skipif(multi_device_unavailable, reason="test requires multiple hardware accelerators")(
89+
test_case
90+
)
9691

9792

9893
def require_bitsandbytes(test_case):
@@ -112,58 +107,57 @@ def require_auto_gptq(test_case):
112107
"""
113108
Decorator marking a test that requires auto-gptq. These tests are skipped when auto-gptq isn't installed.
114109
"""
115-
return unittest.skipUnless(is_gptqmodel_available() or is_auto_gptq_available(), "test requires auto-gptq")(
116-
test_case
117-
)
110+
is_gptq_avaiable = is_gptqmodel_available() or is_auto_gptq_available()
111+
return pytest.mark.skipif(not is_gptq_avaiable, reason="test requires auto-gptq")(test_case)
118112

119113

120114
def require_gptqmodel(test_case):
121115
"""
122116
Decorator marking a test that requires gptqmodel. These tests are skipped when gptqmodel isn't installed.
123117
"""
124-
return unittest.skipUnless(is_gptqmodel_available(), "test requires gptqmodel")(test_case)
118+
return pytest.mark.skipif(not is_gptqmodel_available(), reason="test requires gptqmodel")(test_case)
125119

126120

127121
def require_aqlm(test_case):
128122
"""
129123
Decorator marking a test that requires aqlm. These tests are skipped when aqlm isn't installed.
130124
"""
131-
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
125+
return pytest.mark.skipif(not is_aqlm_available(), reason="test requires aqlm")(test_case)
132126

133127

134128
def require_hqq(test_case):
135129
"""
136130
Decorator marking a test that requires aqlm. These tests are skipped when aqlm isn't installed.
137131
"""
138-
return unittest.skipUnless(is_hqq_available(), "test requires hqq")(test_case)
132+
return pytest.mark.skipif(not is_hqq_available(), reason="test requires hqq")(test_case)
139133

140134

141135
def require_auto_awq(test_case):
142136
"""
143137
Decorator marking a test that requires auto-awq. These tests are skipped when auto-awq isn't installed.
144138
"""
145-
return unittest.skipUnless(is_auto_awq_available(), "test requires auto-awq")(test_case)
139+
return pytest.mark.skipif(not is_auto_awq_available(), reason="test requires auto-awq")(test_case)
146140

147141

148142
def require_eetq(test_case):
149143
"""
150144
Decorator marking a test that requires eetq. These tests are skipped when eetq isn't installed.
151145
"""
152-
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
146+
return pytest.mark.skipif(not is_eetq_available(), reason="test requires eetq")(test_case)
153147

154148

155149
def require_optimum(test_case):
156150
"""
157151
Decorator marking a test that requires optimum. These tests are skipped when optimum isn't installed.
158152
"""
159-
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
153+
return pytest.mark.skipif(not is_optimum_available(), reason="test requires optimum")(test_case)
160154

161155

162156
def require_torchao(test_case):
163157
"""
164158
Decorator marking a test that requires torchao. These tests are skipped when torchao isn't installed.
165159
"""
166-
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
160+
return pytest.mark.skipif(not is_torchao_available(), reason="test requires torchao")(test_case)
167161

168162

169163
def require_deterministic_for_xpu(test_case):

0 commit comments

Comments
 (0)