Skip to content

Commit c094448

Browse files
committed
Fix llava_hf image tokens number issue
1 parent 64f07e4 commit c094448

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

lmms_eval/models/llava_hf.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@ class LlavaHf(lmms):
3131
3232
Example usage:
3333
34-
accelerate launch --num_processes=8 -m lmms_eval \
34+
accelerate launch --num_processes=8 --main_process_port 12345 -m lmms_eval \
3535
--model llava_hf \
3636
--model_args pretrained=llava-hf/llava-1.5-7b-hf \
37-
--tasks mme \
37+
--tasks seedbench \
3838
--batch_size 1 \
3939
--output_path ./logs/ \
4040
--log_samples
@@ -278,7 +278,9 @@ def _collate(x):
278278

279279
# Some benchmarks like MME do not contain image tokens, so we prepend them to the prompt.
280280
if DEFAULT_IMAGE_TOKEN not in context:
281-
context = f"{DEFAULT_IMAGE_TOKEN}\n{context}"
281+
image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visuals)
282+
image_tokens = " ".join(image_tokens)
283+
context = f"{image_tokens}\n{context}"
282284
# Apply chat template
283285
messages = [{"role": "user", "content": context}]
284286
if self.chat_template is not None:

0 commit comments

Comments
 (0)