We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fe6faed commit ded8840Copy full SHA for ded8840
vllm/distributed/parallel_state.py
@@ -1183,6 +1183,11 @@ def cleanup_dist_env_and_memory(shutdown_ray: bool = False):
1183
from vllm.platforms import current_platform
1184
if not current_platform.is_cpu():
1185
torch.cuda.empty_cache()
1186
+ try:
1187
+ torch._C._host_emptyCache()
1188
+ except AttributeError:
1189
+ logger.warning(
1190
+ "torch._C._host_emptyCache() only available in Pytorch >=2.5")
1191
1192
1193
def in_the_same_node_as(pg: Union[ProcessGroup, StatelessProcessGroup],
0 commit comments