Skip to content

Commit 09d0546

Browse files
authored
cpu offloading: mutli GPU support (#1143)
mutli GPU support
1 parent 65d136e commit 09d0546

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def disable_attention_slicing(self):
178178
# set slice_size = `None` to disable `attention slicing`
179179
self.enable_attention_slicing(None)
180180

181-
def enable_sequential_cpu_offload(self):
181+
def enable_sequential_cpu_offload(self, gpu_id=0):
182182
r"""
183183
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
184184
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
@@ -189,7 +189,7 @@ def enable_sequential_cpu_offload(self):
189189
else:
190190
raise ImportError("Please install accelerate via `pip install accelerate`")
191191

192-
device = torch.device("cuda")
192+
device = torch.device(f"cuda:{gpu_id}")
193193

194194
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
195195
if cpu_offloaded_model is not None:

0 commit comments

Comments
 (0)