diff --git a/tests/lora/test_lora_layers_sd.py b/tests/lora/test_lora_layers_sd.py index 0aee4f57c2c6..0f606a056f67 100644 --- a/tests/lora/test_lora_layers_sd.py +++ b/tests/lora/test_lora_layers_sd.py @@ -157,11 +157,12 @@ def test_integration_move_lora_cpu(self): if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)): self.assertTrue(m.weight.device != torch.device("cpu")) + @slow @require_torch_gpu def test_integration_move_lora_dora_cpu(self): from peft import LoraConfig - path = "runwayml/stable-diffusion-v1-5" + path = "Lykon/dreamshaper-8" unet_lora_config = LoraConfig( init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], diff --git a/tests/models/autoencoders/test_models_vae.py b/tests/models/autoencoders/test_models_vae.py index 38cbd788a95e..5a0d62e7c95f 100644 --- a/tests/models/autoencoders/test_models_vae.py +++ b/tests/models/autoencoders/test_models_vae.py @@ -528,6 +528,10 @@ def test_forward_signature(self): def test_forward_with_norm_groups(self): pass + @unittest.skip("No attention module used in this model") + def test_set_attn_processor_for_determinism(self): + return + @slow class AutoencoderTinyIntegrationTests(unittest.TestCase): diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index 2437a5a55cda..2ddf9d361d06 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -220,6 +220,7 @@ class ModelTesterMixin: base_precision = 1e-3 forward_requires_fresh_args = False model_split_percents = [0.5, 0.7, 0.9] + uses_custom_attn_processor = False def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): diff --git a/tests/models/transformers/test_models_transformer_cogvideox.py b/tests/models/transformers/test_models_transformer_cogvideox.py index 83cdf87baa4f..6db4113cbd1b 100644 --- a/tests/models/transformers/test_models_transformer_cogvideox.py +++ b/tests/models/transformers/test_models_transformer_cogvideox.py @@ -32,6 +32,7 @@ class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = CogVideoXTransformer3DModel main_input_name = "hidden_states" + uses_custom_attn_processor = True @property def dummy_input(self): diff --git a/tests/models/transformers/test_models_transformer_lumina.py b/tests/models/transformers/test_models_transformer_lumina.py index 0b3e666999e9..6744fb8ac84b 100644 --- a/tests/models/transformers/test_models_transformer_lumina.py +++ b/tests/models/transformers/test_models_transformer_lumina.py @@ -32,6 +32,7 @@ class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = LuminaNextDiT2DModel main_input_name = "hidden_states" + uses_custom_attn_processor = True @property def dummy_input(self):