Skip to content

Commit 998eeaf

Browse files
[CI/Build] Bump test transformers version (#10106)
Signed-off-by: Isotr0py <[email protected]> Signed-off-by: DarkLight1337 <[email protected]> Co-authored-by: DarkLight1337 <[email protected]>
1 parent 571da8f commit 998eeaf

File tree

5 files changed

+3
-35
lines changed

5 files changed

+3
-35
lines changed

requirements-test.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -550,7 +550,7 @@ tqdm==4.66.6
550550
# transformers
551551
tqdm-multiprocess==0.0.11
552552
# via lm-eval
553-
transformers==4.45.2
553+
transformers==4.46.3
554554
# via
555555
# lm-eval
556556
# peft

tests/models/decoder_only/vision_language/test_models.py

Lines changed: 1 addition & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from typing import Type
77

88
import pytest
9-
import transformers
109
from transformers import AutoModelForVision2Seq
1110
from transformers.utils import is_flash_attn_2_available
1211

@@ -187,12 +186,6 @@
187186
comparator=check_outputs_equal,
188187
max_tokens=8,
189188
dtype="bfloat16",
190-
marks=[
191-
pytest.mark.skipif(
192-
transformers.__version__ < "4.46.2",
193-
reason="Model broken in HF, see huggingface/transformers#34379"
194-
),
195-
]
196189
),
197190
"fuyu": VLMTestInfo(
198191
models=["adept/fuyu-8b"],
@@ -243,13 +236,7 @@
243236
max_model_len=8192,
244237
max_num_seqs=2,
245238
auto_cls=AutoModelForVision2Seq,
246-
marks=[
247-
pytest.mark.skipif(
248-
transformers.__version__ < "4.46.0",
249-
reason="Model introduced in HF >= 4.46.0"
250-
),
251-
large_gpu_mark(min_gb=48),
252-
],
239+
marks=[large_gpu_mark(min_gb=48)],
253240
),
254241
"intern_vl": VLMTestInfo(
255242
models=[
@@ -318,12 +305,6 @@
318305
auto_cls=AutoModelForVision2Seq,
319306
vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output,
320307
image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))],
321-
marks=[
322-
pytest.mark.skipif(
323-
transformers.__version__ < "4.46.2",
324-
reason="Model broken with changes in transformers 4.46"
325-
)
326-
],
327308
),
328309
"minicpmv_25": VLMTestInfo(
329310
models=["openbmb/MiniCPM-Llama3-V-2_5"],
@@ -404,10 +385,6 @@
404385
cuda_device_count_stateless() < 2,
405386
reason="Need at least 2 GPUs to run the test.",
406387
),
407-
pytest.mark.skipif(
408-
transformers.__version__ < "4.46.2",
409-
reason="Model broken in HF, see huggingface/transformers#34379"
410-
)
411388
],
412389
**COMMON_BROADCAST_SETTINGS # type: ignore
413390
),

tests/models/decoder_only/vision_language/test_pixtral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def test_model_engine(vllm_runner, model: str, dtype: str) -> None:
228228
name_1="output")
229229

230230

231-
@large_gpu_test(min_gb=24)
231+
@large_gpu_test(min_gb=48)
232232
@pytest.mark.parametrize(
233233
"prompt,expected_ranges",
234234
[(_create_engine_inputs_hf(IMG_URLS[:1]), [{

tests/models/embedding/vision_language/test_llava_next.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
import pytest
44
import torch.nn.functional as F
5-
import transformers
65
from transformers import AutoModelForVision2Seq
76

87
from ....conftest import IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner
@@ -86,9 +85,6 @@ def _run_test(
8685
)
8786

8887

89-
@pytest.mark.skipif(transformers.__version__.startswith("4.46"),
90-
reason="Model broken with changes in transformers 4.46")
91-
@pytest.mark.core_model
9288
@pytest.mark.parametrize("model", MODELS)
9389
@pytest.mark.parametrize("dtype", ["half"])
9490
def test_models_text(

tests/models/test_initialization.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from unittest.mock import patch
22

33
import pytest
4-
import transformers
54
from transformers import PretrainedConfig
65

76
from vllm import LLM
@@ -11,10 +10,6 @@
1110

1211
@pytest.mark.parametrize("model_arch", HF_EXAMPLE_MODELS.get_supported_archs())
1312
def test_can_initialize(model_arch):
14-
if (model_arch in {"Idefics3ForConditionalGeneration", "GlmForCausalLM"}
15-
and transformers.__version__ < "4.46.0"):
16-
pytest.skip(reason="Model introduced in HF >= 4.46.0")
17-
1813
model_info = HF_EXAMPLE_MODELS.get_hf_info(model_arch)
1914
if not model_info.is_available_online:
2015
pytest.skip("Model is not available online")

0 commit comments

Comments
 (0)