File tree Expand file tree Collapse file tree 1 file changed +6
-0
lines changed Expand file tree Collapse file tree 1 file changed +6
-0
lines changed Original file line number Diff line number Diff line change 11
11
from vllm .config import VllmConfig
12
12
from vllm .distributed import (ensure_model_parallel_initialized ,
13
13
init_distributed_environment )
14
+ from vllm .distributed .parallel_state import get_pp_group
14
15
from vllm .logger import init_logger
15
16
from vllm .model_executor import set_random_seed
16
17
from vllm .platforms import current_platform
@@ -176,3 +177,8 @@ def init_worker_distributed_environment(self) -> None:
176
177
parallel_config .pipeline_parallel_size )
177
178
# global all_reduce needed for overall oneccl warm up
178
179
torch .distributed .all_reduce (torch .zeros (1 ).xpu ())
180
+
181
+ if parallel_config .pipeline_parallel_size > 1 :
182
+ # Add pp group init to avoid
183
+ # p2p communication as the first call
184
+ get_pp_group ().all_reduce (torch .zeros (1 ).xpu ())
You can’t perform that action at this time.
0 commit comments