From 3c0579b1b70aeb3873f0002b8271eb69cd4bcced Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Tue, 6 Jun 2023 22:12:00 +0530 Subject: [PATCH] feat: when using PT 2.0 use LoRAAttnProcessor2_0 for text enc LoRA. --- src/diffusers/loaders.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/diffusers/loaders.py b/src/diffusers/loaders.py index 684a2ba710b9..6ecc701f83e8 100644 --- a/src/diffusers/loaders.py +++ b/src/diffusers/loaders.py @@ -1168,7 +1168,10 @@ def _load_text_encoder_attn_procs( cross_attention_dim = value_dict["to_k_lora.down.weight"].shape[1] hidden_size = value_dict["to_k_lora.up.weight"].shape[0] - attn_processors[key] = LoRAAttnProcessor( + attn_processor_class = ( + LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor + ) + attn_processors[key] = attn_processor_class( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=rank,