20
20
import torch
21
21
22
22
from diffusers import AutoencoderKL , DDIMScheduler , DiTPipeline , DPMSolverMultistepScheduler , Transformer2DModel
23
- from diffusers .utils import load_numpy , slow
23
+ from diffusers .utils import is_xformers_available , load_numpy , slow , torch_device
24
24
from diffusers .utils .testing_utils import require_torch_gpu
25
25
26
26
from ...pipeline_params import (
@@ -97,7 +97,14 @@ def test_inference(self):
97
97
self .assertLessEqual (max_diff , 1e-3 )
98
98
99
99
def test_inference_batch_single_identical (self ):
100
- self ._test_inference_batch_single_identical (relax_max_difference = True )
100
+ self ._test_inference_batch_single_identical (relax_max_difference = True , expected_max_diff = 1e-3 )
101
+
102
+ @unittest .skipIf (
103
+ torch_device != "cuda" or not is_xformers_available (),
104
+ reason = "XFormers attention is only available with CUDA and `xformers` installed" ,
105
+ )
106
+ def test_xformers_attention_forwardGenerator_pass (self ):
107
+ self ._test_xformers_attention_forwardGenerator_pass (expected_max_diff = 1e-3 )
101
108
102
109
103
110
@require_torch_gpu
@@ -123,7 +130,7 @@ def test_dit_256(self):
123
130
expected_image = load_numpy (
124
131
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{ word } .npy"
125
132
)
126
- assert np .abs ((expected_image - image ).max ()) < 1e-3
133
+ assert np .abs ((expected_image - image ).max ()) < 1e-2
127
134
128
135
def test_dit_512 (self ):
129
136
pipe = DiTPipeline .from_pretrained ("facebook/DiT-XL-2-512" )
0 commit comments