We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 180ee64 commit c33dfa1Copy full SHA for c33dfa1
vllm/v1/executor/gpu_executor.py
@@ -1,7 +1,7 @@
1
import os
2
from typing import Optional, Tuple
3
4
-from vllm.config import EngineConfig
+from vllm.config import VllmConfig
5
from vllm.logger import init_logger
6
from vllm.utils import get_distributed_init_method, get_ip, get_open_port
7
from vllm.v1.outputs import ModelRunnerOutput
@@ -12,7 +12,8 @@
12
13
class GPUExecutor:
14
15
- def __init__(self, vllm_config: EngineConfig) -> None:
+ def __init__(self, vllm_config: VllmConfig) -> None:
16
+ self.vllm_config = vllm_config
17
self.model_config = vllm_config.model_config
18
self.cache_config = vllm_config.cache_config
19
self.lora_config = vllm_config.lora_config
0 commit comments