Skip to content

Commit 1e8d7d3

Browse files
committed
Add weight loading test for fairseq2
Signed-off-by: Martin Gleize <[email protected]>
1 parent 2441115 commit 1e8d7d3

File tree

2 files changed

+9
-7
lines changed

2 files changed

+9
-7
lines changed

tests/weight_loading/models.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,4 +30,5 @@ marlin, nm-testing/zephyr-beta-7b-marlin-g128, main
3030
marlin, robertgshaw2/zephyr-7b-beta-channelwise-marlin, main
3131
qqq, HandH1998/QQQ-Llama-3-8b-g128, main
3232
qqq, HandH1998/QQQ-Llama-3-8b, main
33-
hqq, nm-testing/Llama-3.2-1B-Instruct-HQQ, main
33+
hqq, nm-testing/Llama-3.2-1B-Instruct-HQQ, main
34+
None, mgleize/fairseq2-dummy-Llama-3.2-1B, main

tests/weight_loading/test_weight_loading.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,13 @@ def test_weight_loading(vllm_runner):
2020
"""
2121
Test parameter weight loading with tp>1.
2222
"""
23-
with vllm_runner(model_name=MODEL_NAME,
24-
revision=REVISION,
25-
dtype=torch.half if QUANTIZATION == "gptq" else "auto",
26-
quantization=QUANTIZATION,
27-
max_model_len=MAX_MODEL_LEN,
28-
tensor_parallel_size=2) as model:
23+
with vllm_runner(
24+
model_name=MODEL_NAME,
25+
revision=REVISION,
26+
dtype=torch.half if QUANTIZATION == "gptq" else "auto",
27+
quantization=None if QUANTIZATION == "None" else QUANTIZATION,
28+
max_model_len=MAX_MODEL_LEN,
29+
tensor_parallel_size=2) as model:
2930

3031
output = model.generate_greedy("Hello world!", max_tokens=20)
3132
print(output)

0 commit comments

Comments
 (0)