Skip to content

Commit 781775e

Browse files
authored
Slow Test for Pipelines minor fixes (#6221)
update
1 parent fa3c86b commit 781775e

File tree

6 files changed

+41
-13
lines changed

6 files changed

+41
-13
lines changed

src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -283,6 +283,9 @@ def __call__(
283283
f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}"
284284
)
285285

286+
# Offload all models
287+
self.maybe_free_model_hooks()
288+
286289
if output_type == "latent":
287290
return ShapEPipelineOutput(images=latents)
288291

@@ -312,9 +315,6 @@ def __call__(
312315
if output_type == "pil":
313316
images = [self.numpy_to_pil(image) for image in images]
314317

315-
# Offload all models
316-
self.maybe_free_model_hooks()
317-
318318
if not return_dict:
319319
return (images,)
320320

src/diffusers/pipelines/unclip/pipeline_unclip.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -477,8 +477,9 @@ def __call__(
477477
image = super_res_latents
478478
# done super res
479479

480-
# post processing
480+
self.maybe_free_model_hooks()
481481

482+
# post processing
482483
image = image * 0.5 + 0.5
483484
image = image.clamp(0, 1)
484485
image = image.cpu().permute(0, 2, 3, 1).float().numpy()

src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,7 @@ def __call__(
403403
image = super_res_latents
404404

405405
# done super res
406+
self.maybe_free_model_hooks()
406407

407408
# post processing
408409

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
UNet2DConditionModel,
1515
UNetMotionModel,
1616
)
17-
from diffusers.utils import logging
17+
from diffusers.utils import is_xformers_available, logging
1818
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device
1919

2020
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
@@ -233,6 +233,35 @@ def test_prompt_embeds(self):
233233
inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device)
234234
pipe(**inputs)
235235

236+
@unittest.skipIf(
237+
torch_device != "cuda" or not is_xformers_available(),
238+
reason="XFormers attention is only available with CUDA and `xformers` installed",
239+
)
240+
def test_xformers_attention_forwardGenerator_pass(self):
241+
components = self.get_dummy_components()
242+
pipe = self.pipeline_class(**components)
243+
for component in pipe.components.values():
244+
if hasattr(component, "set_default_attn_processor"):
245+
component.set_default_attn_processor()
246+
pipe.to(torch_device)
247+
pipe.set_progress_bar_config(disable=None)
248+
249+
inputs = self.get_dummy_inputs(torch_device)
250+
output_without_offload = pipe(**inputs).frames[0]
251+
output_without_offload = (
252+
output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload
253+
)
254+
255+
pipe.enable_xformers_memory_efficient_attention()
256+
inputs = self.get_dummy_inputs(torch_device)
257+
output_with_offload = pipe(**inputs).frames[0]
258+
output_with_offload = (
259+
output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload
260+
)
261+
262+
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
263+
self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
264+
236265

237266
@slow
238267
@require_torch_gpu

tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -804,8 +804,7 @@ def test_stable_diffusion_adapter_zoedepth_sd_v15(self):
804804
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
805805
pipe.to(torch_device)
806806
pipe.set_progress_bar_config(disable=None)
807-
pipe.enable_attention_slicing()
808-
807+
pipe.enable_model_cpu_offload()
809808
generator = torch.Generator(device="cpu").manual_seed(0)
810809
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
811810

tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -681,7 +681,7 @@ def test_canny_lora(self):
681681
variant="fp16",
682682
)
683683
pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors")
684-
pipe.enable_sequential_cpu_offload()
684+
pipe.enable_model_cpu_offload()
685685
pipe.set_progress_bar_config(disable=None)
686686

687687
generator = torch.Generator(device="cpu").manual_seed(0)
@@ -694,8 +694,6 @@ def test_canny_lora(self):
694694

695695
assert images[0].shape == (768, 512, 3)
696696

697-
original_image = images[0, -3:, -3:, -1].flatten()
698-
expected_image = np.array(
699-
[0.50346327, 0.50708383, 0.50719553, 0.5135172, 0.5155377, 0.5066059, 0.49680984, 0.5005894, 0.48509413]
700-
)
701-
assert numpy_cosine_similarity_distance(original_image, expected_image) < 1e-4
697+
image_slice = images[0, -3:, -3:, -1].flatten()
698+
expected_slice = np.array([0.4284, 0.4337, 0.4319, 0.4255, 0.4329, 0.4280, 0.4338, 0.4420, 0.4226])
699+
assert numpy_cosine_similarity_distance(image_slice, expected_slice) < 1e-4

0 commit comments

Comments
 (0)