Skip to content

Commit 5d0befa

Browse files
author
Youssef Adarrab
committed
Fix flake8 C419 unnecessary list comprehension code quality error
1 parent c56cd09 commit 5d0befa

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

src/diffusers/loaders.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@ def _modify_text_encoder(self, attn_processors: Dict[str, LoRAAttnProcessor]):
796796
"""
797797
# Loop over the original attention modules.
798798
for name, _ in self.text_encoder.named_modules():
799-
if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]):
799+
if any((x in name for x in TEXT_ENCODER_TARGET_MODULES)):
800800
# Retrieve the module and its corresponding LoRA processor.
801801
module = self.text_encoder.get_submodule(name)
802802
# Construct a new function that performs the LoRA merging. We will monkey patch

tests/models/test_lora_layers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def create_unet_lora_layers(unet: nn.Module):
4646
def create_text_encoder_lora_layers(text_encoder: nn.Module):
4747
text_lora_attn_procs = {}
4848
for name, module in text_encoder.named_modules():
49-
if any([x in name for x in TEXT_ENCODER_TARGET_MODULES]):
49+
if any((x in name for x in TEXT_ENCODER_TARGET_MODULES)):
5050
text_lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=module.out_features, cross_attention_dim=None)
5151
text_encoder_lora_layers = AttnProcsLayers(text_lora_attn_procs)
5252
return text_encoder_lora_layers

0 commit comments

Comments
 (0)