We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3d1cfbf commit e1d5402Copy full SHA for e1d5402
vllm/worker/worker.py
@@ -48,6 +48,14 @@ def __init__(
48
self.gpu_cache = None
49
50
def init_model(self, cupy_port: Optional[int] = None):
51
+ # torch.distributed.all_reduce does not free the input tensor until
52
+ # the synchronization point. This causes the memory usage to grow
53
+ # as the number of all_reduce calls increases. This env var disables
54
+ # this behavior.
55
+ # Related issue:
56
+ # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
57
+ os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
58
+
59
# This env var set by Ray causes exceptions with graph building.
60
os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
61
# Env vars will be set by Ray.
0 commit comments