From 57d7e7a2fbe161db3dc9fe7f3e408a98c78b1f0f Mon Sep 17 00:00:00 2001 From: yiyixuxu Date: Fri, 24 Feb 2023 00:30:57 +0000 Subject: [PATCH] add attn_res variable --- .../pipeline_stable_diffusion_attend_and_excite.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_attend_and_excite.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_attend_and_excite.py index 8e6330c9a983..662bbf9a362f 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_attend_and_excite.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_attend_and_excite.py @@ -691,6 +691,7 @@ def __call__( max_iter_to_alter: int = 25, thresholds: dict = {0: 0.05, 10: 0.5, 20: 0.8}, scale_factor: int = 20, + attn_res: int = 16, ): r""" Function invoked when calling the pipeline for generation. @@ -762,6 +763,8 @@ def __call__( Dictionary defining the iterations and desired thresholds to apply iterative latent refinement in. scale_factor (`int`, *optional*, default to 20): Scale factor that controls the step size of each Attend and Excite update. + attn_res (`int`, *optional*, default to 16): + The resolution of most semantic attention map. Examples: @@ -834,7 +837,7 @@ def __call__( # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - self.attention_store = AttentionStore() + self.attention_store = AttentionStore(attn_res=attn_res) self.register_attention_control() # default config for step size from original repo