Skip to content

Commit bc3b42c

Browse files
Jun-Howieqinxuye
andauthored
FEAT: [model] Support Qwen3-VL (#4112)
Co-authored-by: qinxuye <[email protected]>
1 parent ebf3bf8 commit bc3b42c

File tree

5 files changed

+200
-8
lines changed

5 files changed

+200
-8
lines changed

xinference/model/llm/llm_family.json

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21617,5 +21617,174 @@
2161721617
],
2161821618
"reasoning_start_tag": "<think>",
2161921619
"reasoning_end_tag": "</think>"
21620+
},
21621+
{
21622+
"version": 2,
21623+
"context_length": 262144,
21624+
"model_name": "Qwen3-VL-Instruct",
21625+
"model_lang": [
21626+
"en",
21627+
"zh"
21628+
],
21629+
"model_ability": [
21630+
"chat",
21631+
"vision",
21632+
"tools"
21633+
],
21634+
"model_description": "Meet Qwen3-VL — the most powerful vision-language model in the Qwen series to date.",
21635+
"model_specs": [
21636+
{
21637+
"model_format": "pytorch",
21638+
"model_size_in_billions": 235,
21639+
"activated_size_in_billions": 22,
21640+
"model_src": {
21641+
"huggingface": {
21642+
"quantizations": [
21643+
"none"
21644+
],
21645+
"model_id": "Qwen/Qwen3-VL-235B-A22B-Instruct"
21646+
},
21647+
"modelscope": {
21648+
"quantizations": [
21649+
"none"
21650+
],
21651+
"model_id": "Qwen/Qwen3-VL-235B-A22B-Instruct"
21652+
}
21653+
}
21654+
},
21655+
{
21656+
"model_format": "fp8",
21657+
"model_size_in_billions": 235,
21658+
"activated_size_in_billions": 22,
21659+
"model_src": {
21660+
"huggingface": {
21661+
"quantizations": [
21662+
"fp8"
21663+
],
21664+
"model_id": "QuantTrio/Qwen3-VL-235B-A22B-Instruct-FP8"
21665+
},
21666+
"modelscope": {
21667+
"quantizations": [
21668+
"fp8"
21669+
],
21670+
"model_id": "tclf90/Qwen3-VL-235B-A22B-Instruct-FP8"
21671+
}
21672+
}
21673+
},
21674+
{
21675+
"model_format": "awq",
21676+
"model_size_in_billions": 235,
21677+
"activated_size_in_billions": 22,
21678+
"model_src": {
21679+
"huggingface": {
21680+
"quantizations": [
21681+
"Int4"
21682+
],
21683+
"model_id": "QuantTrio/Qwen3-VL-235B-A22B-Instruct-AWQ"
21684+
},
21685+
"modelscope": {
21686+
"quantizations": [
21687+
"Int4"
21688+
],
21689+
"model_id": "tclf90/Qwen3-VL-235B-A22B-Instruct-AWQ"
21690+
}
21691+
}
21692+
}
21693+
],
21694+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {%- if messages[0].content is string %}\n {{- messages[0].content }}\n {%- else %}\n {%- for content in messages[0].content %}\n {%- if 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].content is string %}\n {{- messages[0].content }}\n {%- else %}\n {%- for content in messages[0].content %}\n {%- if 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set image_count = namespace(value=0) %}\n{%- set video_count = namespace(value=0) %}\n{%- for message in messages %}\n {%- if message.role == \"user\" %}\n {{- '<|im_start|>' + message.role + '\\n' }}\n {%- if message.content is string %}\n {{- message.content }}\n {%- else %}\n {%- for content in message.content %}\n {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif content.type == 'video' or 'video' in content %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\\n' }}\n {%- if message.content is string %}\n {{- message.content }}\n {%- else %}\n {%- for content_item in message.content %}\n {%- if 'text' in content_item %}\n {{- content_item.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and message.content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {%- if message.content is string %}\n {{- message.content }}\n {%- else %}\n {%- for content in message.content %}\n {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif content.type == 'video' or 'video' in content %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
21695+
"stop_token_ids": [
21696+
151643,
21697+
151645
21698+
],
21699+
"stop": [
21700+
"<|endoftext|>",
21701+
"<|im_end|>"
21702+
]
21703+
},
21704+
{
21705+
"version": 2,
21706+
"context_length": 262144,
21707+
"model_name": "Qwen3-VL-Thinking",
21708+
"model_lang": [
21709+
"en",
21710+
"zh"
21711+
],
21712+
"model_ability": [
21713+
"chat",
21714+
"vision",
21715+
"reasoning",
21716+
"tools"
21717+
],
21718+
"model_description": "Meet Qwen3-VL — the most powerful vision-language model in the Qwen series to date.",
21719+
"model_specs": [
21720+
{
21721+
"model_format": "pytorch",
21722+
"model_size_in_billions": 235,
21723+
"activated_size_in_billions": 22,
21724+
"model_src": {
21725+
"huggingface": {
21726+
"quantizations": [
21727+
"none"
21728+
],
21729+
"model_id": "Qwen/Qwen3-VL-235B-A22B-Thinking"
21730+
},
21731+
"modelscope": {
21732+
"quantizations": [
21733+
"none"
21734+
],
21735+
"model_id": "Qwen/Qwen3-VL-235B-A22B-Thinking"
21736+
}
21737+
}
21738+
},
21739+
{
21740+
"model_format": "fp8",
21741+
"model_size_in_billions": 235,
21742+
"activated_size_in_billions": 22,
21743+
"model_src": {
21744+
"huggingface": {
21745+
"quantizations": [
21746+
"fp8"
21747+
],
21748+
"model_id": "QuantTrio/Qwen3-VL-235B-A22B-Thinking-FP8"
21749+
},
21750+
"modelscope": {
21751+
"quantizations": [
21752+
"fp8"
21753+
],
21754+
"model_id": "tclf90/Qwen3-VL-235B-A22B-Thinking-FP8"
21755+
}
21756+
}
21757+
},
21758+
{
21759+
"model_format": "awq",
21760+
"model_size_in_billions": 235,
21761+
"activated_size_in_billions": 22,
21762+
"model_src": {
21763+
"huggingface": {
21764+
"quantizations": [
21765+
"Int4"
21766+
],
21767+
"model_id": "QuantTrio/Qwen3-VL-235B-A22B-Thinking-AWQ"
21768+
},
21769+
"modelscope": {
21770+
"quantizations": [
21771+
"Int4"
21772+
],
21773+
"model_id": "tclf90/Qwen3-VL-235B-A22B-Thinking-AWQ"
21774+
}
21775+
}
21776+
}
21777+
],
21778+
"chat_template": "{%- set image_count = namespace(value=0) %}\n{%- set video_count = namespace(value=0) %}\n{%- macro render_content(content, do_vision_count) %}\n {%- if content is string %}\n {{- content }}\n {%- else %}\n {%- for item in content %}\n {%- if 'image' in item or 'image_url' in item or item.type == 'image' %}\n {%- if do_vision_count %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif 'video' in item or item.type == 'video' %}\n {%- if do_vision_count %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in item %}\n {{- item.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n{%- endmacro %}\n{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- render_content(messages[0].content, false) + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + render_content(messages[0].content, false) + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" %}\n {%- set content = render_content(message.content, false) %}\n {%- if not(content.startswith('<tool_response>') and content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- set content = render_content(message.content, True) %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n<think>\\n' }}\n{%- endif %}\n",
21779+
"stop_token_ids": [
21780+
151643,
21781+
151645
21782+
],
21783+
"stop": [
21784+
"<|endoftext|>",
21785+
"<|im_end|>"
21786+
],
21787+
"reasoning_start_tag": "<think>",
21788+
"reasoning_end_tag": "</think>"
2162021789
}
2162121790
]

xinference/model/llm/transformers/multimodal/qwen2_vl.py

Lines changed: 24 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,19 @@
2727

2828

2929
@register_batching_multimodal_models(
30-
"qwen2-vl-instruct", "qwen2.5-vl-instruct", "QvQ-72B-Preview"
30+
"qwen2-vl-instruct",
31+
"qwen2.5-vl-instruct",
32+
"QvQ-72B-Preview",
33+
"Qwen3-VL-Instruct",
34+
"Qwen3-VL-Thinking",
3135
)
3236
@register_transformer
3337
@register_non_default_model(
34-
"qwen2-vl-instruct", "qwen2.5-vl-instruct", "QvQ-72B-Preview"
38+
"qwen2-vl-instruct",
39+
"qwen2.5-vl-instruct",
40+
"QvQ-72B-Preview",
41+
"Qwen3-VL-Instruct",
42+
"Qwen3-VL-Thinking",
3543
)
3644
class Qwen2VLChatModel(PytorchMultiModalModel):
3745
def _sanitize_model_config(
@@ -47,7 +55,7 @@ def _sanitize_model_config(
4755
def match_json(
4856
cls, model_family: "LLMFamilyV2", model_spec: "LLMSpecV1", quantization: str
4957
) -> bool:
50-
if model_spec.model_format not in ["pytorch", "gptq", "awq", "bnb"]:
58+
if model_spec.model_format not in ["pytorch", "gptq", "awq", "bnb", "fp8"]:
5159
return False
5260
llm_family = model_family.model_family or model_family.model_name
5361
if "qwen2-vl-instruct".lower() in llm_family.lower():
@@ -56,6 +64,8 @@ def match_json(
5664
return True
5765
if "qvq-72b-preview".lower() in llm_family.lower():
5866
return True
67+
if "qwen3-vl" in llm_family.lower():
68+
return True
5969
return False
6070

6171
def decide_device(self):
@@ -85,13 +95,19 @@ def load_multimodal_model(self):
8595
except ImportError:
8696
Qwen2_5_VLForConditionalGeneration = None
8797

98+
try:
99+
from transformers import AutoModelForImageTextToText
100+
except ImportError:
101+
AutoModelForImageTextToText = None
102+
88103
kwargs = self.apply_bnb_quantization()
89104
llm_family = self.model_family.model_family or self.model_family.model_name
90-
model_cls = (
91-
Qwen2_5_VLForConditionalGeneration
92-
if "qwen2.5" in llm_family
93-
else Qwen2VLForConditionalGeneration
94-
)
105+
if "qwen2.5" in llm_family:
106+
model_cls = Qwen2_5_VLForConditionalGeneration
107+
elif "qwen3" in llm_family:
108+
model_cls = AutoModelForImageTextToText
109+
else:
110+
model_cls = Qwen2VLForConditionalGeneration
95111
if model_cls is None:
96112
raise ImportError("`transformers` version is too old, please upgrade it")
97113
device = "auto" if self._device == "cuda" else self._device

xinference/model/llm/utils.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@
7171
"Qwen3-Thinking",
7272
"Qwen3-Instruct",
7373
"Qwen3-Coder",
74+
"Qwen3-VL-Instruct",
75+
"Qwen3-VL-Thinking",
7476
]
7577

7678
GLM4_TOOL_CALL_FAMILY = [

xinference/model/llm/vllm/core.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,10 @@ class VLLMGenerateConfig(TypedDict, total=False):
289289
if VLLM_INSTALLED and VLLM_VERSION >= version.parse("0.10.2"):
290290
VLLM_SUPPORTED_CHAT_MODELS.append("seed-oss")
291291

292+
if VLLM_INSTALLED and VLLM_VERSION > version.parse("0.10.2"):
293+
VLLM_SUPPORTED_VISION_MODEL_LIST.append("Qwen3-VL-Instruct")
294+
VLLM_SUPPORTED_VISION_MODEL_LIST.append("Qwen3-VL-Instruct")
295+
292296

293297
class VLLMModel(LLM):
294298
def __init__(

xinference/ui/web/ui/src/scenes/launch_model/data/data.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ export const additionalParameterTipList = {
4444
'pipeline_parallel_size',
4545
'enable_prefix_caching',
4646
'enable_chunked_prefill',
47+
'enable_expert_parallel',
4748
'enforce_eager',
4849
'cpu_offload_gb',
4950
'disable_custom_all_reduce',

0 commit comments

Comments
 (0)