We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6c095b2 commit 8222dc9Copy full SHA for 8222dc9
pytorch_lightning/plugins/training_type/ddp_spawn.py
@@ -21,6 +21,7 @@
21
import torch.multiprocessing as mp
22
from torch.nn.parallel.distributed import DistributedDataParallel
23
from torch.optim import Optimizer
24
+import numpy
25
26
from pytorch_lightning.distributed.dist import LightningDistributed
27
from pytorch_lightning.overrides import LightningDistributedModule
@@ -78,6 +79,7 @@ def distributed_sampler_kwargs(self):
78
79
80
def setup(self, model):
81
os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port())
82
+ os.environ["MKL_SERVICE_FORCE_INTEL"] = "1"
83
84
# pass in a state q
85
smp = mp.get_context("spawn")
0 commit comments