Skip to content

Commit ef68eb2

Browse files
[Bug] Fix pickling of ModelConfig when RunAI Model Streamer is used (#11825)
Signed-off-by: DarkLight1337 <[email protected]>
1 parent 259abd8 commit ef68eb2

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

vllm/config.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -381,16 +381,16 @@ def maybe_pull_model_tokenizer_for_s3(self, model: str,
381381
"""
382382
if is_s3(model) or is_s3(tokenizer):
383383
if is_s3(model):
384-
self.s3_model = S3Model()
385-
self.s3_model.pull_files(model, allow_pattern=["*config.json"])
384+
s3_model = S3Model()
385+
s3_model.pull_files(model, allow_pattern=["*config.json"])
386386
self.model_weights = self.model
387-
self.model = self.s3_model.dir
387+
self.model = s3_model.dir
388388

389389
if is_s3(tokenizer):
390-
self.s3_tokenizer = S3Model()
391-
self.s3_tokenizer.pull_files(
390+
s3_tokenizer = S3Model()
391+
s3_tokenizer.pull_files(
392392
model, ignore_pattern=["*.pt", "*.safetensors", "*.bin"])
393-
self.tokenizer = self.s3_tokenizer.dir
393+
self.tokenizer = s3_tokenizer.dir
394394

395395
def _init_multimodal_config(
396396
self, limit_mm_per_prompt: Optional[Mapping[str, int]]

0 commit comments

Comments
 (0)