15
15
import sys
16
16
import unittest
17
17
18
+ import torch
18
19
from transformers import AutoTokenizer , T5EncoderModel
19
20
20
21
from diffusers import (
21
22
AuraFlowPipeline ,
23
+ AuraFlowTransformer2DModel ,
22
24
FlowMatchEulerDiscreteScheduler ,
23
25
)
24
- from diffusers .utils .testing_utils import is_peft_available , require_peft_backend
26
+ from diffusers .utils .testing_utils import (
27
+ floats_tensor ,
28
+ is_peft_available ,
29
+ require_peft_backend ,
30
+ )
25
31
26
32
27
33
if is_peft_available ():
@@ -49,8 +55,9 @@ class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
49
55
"joint_attention_dim" : 32 ,
50
56
"caption_projection_dim" : 32 ,
51
57
"out_channels" : 4 ,
52
- "pos_embed_max_size" : 32 ,
58
+ "pos_embed_max_size" : 64 ,
53
59
}
60
+ transformer_cls = AuraFlowTransformer2DModel
54
61
tokenizer_cls , tokenizer_id = AutoTokenizer , "hf-internal-testing/tiny-random-t5"
55
62
text_encoder_cls , text_encoder_id = T5EncoderModel , "hf-internal-testing/tiny-random-t5"
56
63
@@ -71,3 +78,26 @@ class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
71
78
@property
72
79
def output_shape (self ):
73
80
return (1 , 64 , 64 , 3 )
81
+
82
+ def get_dummy_inputs (self , with_generator = True ):
83
+ batch_size = 1
84
+ sequence_length = 10
85
+ num_channels = 4
86
+ sizes = (32 , 32 )
87
+
88
+ generator = torch .manual_seed (0 )
89
+ noise = floats_tensor ((batch_size , num_channels ) + sizes )
90
+ input_ids = torch .randint (1 , sequence_length , size = (batch_size , sequence_length ), generator = generator )
91
+
92
+ pipeline_inputs = {
93
+ "prompt" : "A painting of a squirrel eating a burger" ,
94
+ "num_inference_steps" : 4 ,
95
+ "guidance_scale" : 0.0 ,
96
+ "height" : 8 ,
97
+ "width" : 8 ,
98
+ "output_type" : "np" ,
99
+ }
100
+ if with_generator :
101
+ pipeline_inputs .update ({"generator" : generator })
102
+
103
+ return noise , input_ids , pipeline_inputs
0 commit comments