diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py index 550513b5c943..e764fc687ca5 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py @@ -4,7 +4,8 @@ import torch -from tqdm.auto import tqdm +from tqdm.contrib import tenumerate + from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from ...models import AutoencoderKL, UNet2DConditionModel @@ -126,7 +127,7 @@ def __call__( if accepts_eta: extra_step_kwargs["eta"] = eta - for i, t in tqdm(enumerate(self.scheduler.timesteps)): + for i, t in tenumerate(self.scheduler.timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents if isinstance(self.scheduler, LMSDiscreteScheduler):