|
6 | 6 | from typing import Type
|
7 | 7 |
|
8 | 8 | import pytest
|
9 |
| -import transformers |
10 | 9 | from transformers import AutoModelForVision2Seq
|
11 | 10 | from transformers.utils import is_flash_attn_2_available
|
12 | 11 |
|
|
187 | 186 | comparator=check_outputs_equal,
|
188 | 187 | max_tokens=8,
|
189 | 188 | dtype="bfloat16",
|
190 |
| - marks=[ |
191 |
| - pytest.mark.skipif( |
192 |
| - transformers.__version__ < "4.46.2", |
193 |
| - reason="Model broken in HF, see huggingface/transformers#34379" |
194 |
| - ), |
195 |
| - ] |
196 | 189 | ),
|
197 | 190 | "fuyu": VLMTestInfo(
|
198 | 191 | models=["adept/fuyu-8b"],
|
|
243 | 236 | max_model_len=8192,
|
244 | 237 | max_num_seqs=2,
|
245 | 238 | auto_cls=AutoModelForVision2Seq,
|
246 |
| - marks=[ |
247 |
| - pytest.mark.skipif( |
248 |
| - transformers.__version__ < "4.46.0", |
249 |
| - reason="Model introduced in HF >= 4.46.0" |
250 |
| - ), |
251 |
| - large_gpu_mark(min_gb=48), |
252 |
| - ], |
| 239 | + marks=[large_gpu_mark(min_gb=48)], |
253 | 240 | ),
|
254 | 241 | "intern_vl": VLMTestInfo(
|
255 | 242 | models=[
|
|
318 | 305 | auto_cls=AutoModelForVision2Seq,
|
319 | 306 | vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output,
|
320 | 307 | image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))],
|
321 |
| - marks=[ |
322 |
| - pytest.mark.skipif( |
323 |
| - transformers.__version__ < "4.46.2", |
324 |
| - reason="Model broken with changes in transformers 4.46" |
325 |
| - ) |
326 |
| - ], |
327 | 308 | ),
|
328 | 309 | "minicpmv_25": VLMTestInfo(
|
329 | 310 | models=["openbmb/MiniCPM-Llama3-V-2_5"],
|
|
404 | 385 | cuda_device_count_stateless() < 2,
|
405 | 386 | reason="Need at least 2 GPUs to run the test.",
|
406 | 387 | ),
|
407 |
| - pytest.mark.skipif( |
408 |
| - transformers.__version__ < "4.46.2", |
409 |
| - reason="Model broken in HF, see huggingface/transformers#34379" |
410 |
| - ) |
411 | 388 | ],
|
412 | 389 | **COMMON_BROADCAST_SETTINGS # type: ignore
|
413 | 390 | ),
|
|
0 commit comments