diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml
index 26d3dbcf4e83..fc101347a6e9 100644
--- a/docs/source/en/_toctree.yml
+++ b/docs/source/en/_toctree.yml
@@ -203,6 +203,8 @@
title: Text-to-Image Generation with ControlNet Conditioning
- local: api/pipelines/stable_diffusion/model_editing
title: Text-to-Image Model Editing
+ - local: api/pipelines/stable_diffusion/diffedit
+ title: DiffEdit
title: Stable Diffusion
- local: api/pipelines/stable_diffusion_2
title: Stable Diffusion 2
diff --git a/docs/source/en/api/logging.mdx b/docs/source/en/api/logging.mdx
index b52c0434f42d..bb973db781ea 100644
--- a/docs/source/en/api/logging.mdx
+++ b/docs/source/en/api/logging.mdx
@@ -61,7 +61,7 @@ verbose to the most verbose), those levels (with their corresponding int values
critical errors.
- `diffusers.logging.ERROR` (int value, 40): only report errors.
- `diffusers.logging.WARNING` or `diffusers.logging.WARN` (int value, 30): only reports error and
- warnings. This the default level used by the library.
+ warnings. This is the default level used by the library.
- `diffusers.logging.INFO` (int value, 20): reports error, warnings and basic information.
- `diffusers.logging.DEBUG` (int value, 10): report all information.
diff --git a/docs/source/en/api/pipelines/stable_diffusion/diffedit.mdx b/docs/source/en/api/pipelines/stable_diffusion/diffedit.mdx
new file mode 100644
index 000000000000..a7cd906e0e77
--- /dev/null
+++ b/docs/source/en/api/pipelines/stable_diffusion/diffedit.mdx
@@ -0,0 +1,360 @@
+
+
+# Zero-shot Diffusion-based Semantic Image Editing with Mask Guidance
+
+## Overview
+
+[DiffEdit: Diffusion-based semantic image editing with mask guidance](https://arxiv.org/abs/2210.11427) by Guillaume Couairon, Jakob Verbeek, Holger Schwenk, and Matthieu Cord.
+
+The abstract of the paper is the following:
+
+*Image generation has recently seen tremendous advances, with diffusion models allowing to synthesize convincing images for a large variety of text prompts. In this article, we propose DiffEdit, a method to take advantage of text-conditioned diffusion models for the task of semantic image editing, where the goal is to edit an image based on a text query. Semantic image editing is an extension of image generation, with the additional constraint that the generated image should be as similar as possible to a given input image. Current editing methods based on diffusion models usually require to provide a mask, making the task much easier by treating it as a conditional inpainting task. In contrast, our main contribution is able to automatically generate a mask highlighting regions of the input image that need to be edited, by contrasting predictions of a diffusion model conditioned on different text prompts. Moreover, we rely on latent inference to preserve content in those regions of interest and show excellent synergies with mask-based diffusion. DiffEdit achieves state-of-the-art editing performance on ImageNet. In addition, we evaluate semantic image editing in more challenging settings, using images from the COCO dataset as well as text-based generated images.*
+
+Resources:
+
+* [Paper](https://arxiv.org/abs/2210.11427).
+* [Blog Post with Demo](https://blog.problemsolversguild.com/technical/research/2022/11/02/DiffEdit-Implementation.html).
+* [Implementation on Github](https://github.com/Xiang-cd/DiffEdit-stable-diffusion/).
+
+## Tips
+
+* The pipeline can generate masks that can be fed into other inpainting pipelines. Check out the code examples below to know more.
+* In order to generate an image using this pipeline, both an image mask (manually specified or generated using `generate_mask`)
+and a set of partially inverted latents (generated using `invert`) _must_ be provided as arguments when calling the pipeline to generate the final edited image.
+Refer to the code examples below for more details.
+* The function `generate_mask` exposes two prompt arguments, `source_prompt` and `target_prompt`,
+that let you control the locations of the semantic edits in the final image to be generated. Let's say,
+you wanted to translate from "cat" to "dog". In this case, the edit direction will be "cat -> dog". To reflect
+this in the generated mask, you simply have to set the embeddings related to the phrases including "cat" to
+`source_prompt_embeds` and "dog" to `target_prompt_embeds`. Refer to the code example below for more details.
+* When generating partially inverted latents using `invert`, assign a caption or text embedding describing the
+overall image to the `prompt` argument to help guide the inverse latent sampling process. In most cases, the
+source concept is sufficently descriptive to yield good results, but feel free to explore alternatives.
+Please refer to [this code example](#generating-image-captions-for-inversion) for more details.
+* When calling the pipeline to generate the final edited image, assign the source concept to `negative_prompt`
+and the target concept to `prompt`. Taking the above example, you simply have to set the embeddings related to
+the phrases including "cat" to `negative_prompt_embeds` and "dog" to `prompt_embeds`. Refer to the code example
+below for more details.
+* If you wanted to reverse the direction in the example above, i.e., "dog -> cat", then it's recommended to:
+ * Swap the `source_prompt` and `target_prompt` in the arguments to `generate_mask`.
+ * Change the input prompt for `invert` to include "dog".
+ * Swap the `prompt` and `negative_prompt` in the arguments to call the pipeline to generate the final edited image.
+* Note that the source and target prompts, or their corresponding embeddings, can also be automatically generated. Please, refer to [this discussion](#generating-source-and-target-embeddings) for more details.
+
+## Available Pipelines:
+
+| Pipeline | Tasks
+|---|---|
+| [StableDiffusionDiffEditPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py) | *Text-Based Image Editing*
+
+
+
+## Usage example
+
+### Based on an input image with a caption
+
+When the pipeline is conditioned on an input image, we first obtain partially inverted latents from the input image using a
+`DDIMInverseScheduler` with the help of a caption. Then we generate an editing mask to identify relevant regions in the image using the source and target prompts. Finally,
+the inverted noise and generated mask is used to start the generation process.
+
+First, let's load our pipeline:
+
+```py
+import torch
+from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionPix2PixZeroPipeline
+
+sd_model_ckpt = "stabilityai/stable-diffusion-2-1"
+pipeline = StableDiffusionDiffEditPipeline.from_pretrained(
+ sd_model_ckpt,
+ torch_dtype=torch.float16,
+ safety_checker=None,
+)
+pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
+pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
+pipeline.enable_model_cpu_offload()
+pipeline.enable_vae_slicing()
+generator = torch.manual_seed(0)
+```
+
+Then, we load an input image to edit using our method:
+
+```py
+from diffusers.utils import load_image
+
+img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
+raw_image = load_image(img_url).convert("RGB").resize((768, 768))
+```
+
+Then, we employ the source and target prompts to generate the editing mask:
+
+```py
+# See the "Generating source and target embeddings" section below to
+# automate the generation of these captions with a pre-trained model like Flan-T5 as explained below.
+
+source_prompt = "a bowl of fruits"
+target_prompt = "a basket of fruits"
+mask_image = pipeline.generate_mask(
+ image=raw_image,
+ source_prompt=source_prompt,
+ target_prompt=target_prompt,
+ generator=generator,
+)
+```
+
+Then, we employ the caption and the input image to get the inverted latents:
+
+```py
+inv_latents = pipeline.invert(prompt=source_prompt, image=raw_image, generator=generator).latents
+```
+
+Now, generate the image with the inverted latents and semantically generated mask:
+
+```py
+image = pipeline(
+ prompt=target_prompt,
+ mask_image=mask_image,
+ image_latents=inv_latents,
+ generator=generator,
+ negative_prompt=source_prompt,
+).images[0]
+image.save("edited_image.png")
+```
+
+## Generating image captions for inversion
+
+The authors originally used the source concept prompt as the caption for generating the partially inverted latents. However, we can also leverage open source and public image captioning models for the same purpose.
+Below, we provide an end-to-end example with the [BLIP](https://huggingface.co/docs/transformers/model_doc/blip) model
+for generating captions.
+
+First, let's load our automatic image captioning model:
+
+```py
+import torch
+from transformers import BlipForConditionalGeneration, BlipProcessor
+
+captioner_id = "Salesforce/blip-image-captioning-base"
+processor = BlipProcessor.from_pretrained(captioner_id)
+model = BlipForConditionalGeneration.from_pretrained(captioner_id, torch_dtype=torch.float16, low_cpu_mem_usage=True)
+```
+
+Then, we define a utility to generate captions from an input image using the model:
+
+```py
+@torch.no_grad()
+def generate_caption(images, caption_generator, caption_processor):
+ text = "a photograph of"
+
+ inputs = caption_processor(images, text, return_tensors="pt").to(device="cuda", dtype=caption_generator.dtype)
+ caption_generator.to("cuda")
+ outputs = caption_generator.generate(**inputs, max_new_tokens=128)
+
+ # offload caption generator
+ caption_generator.to("cpu")
+
+ caption = caption_processor.batch_decode(outputs, skip_special_tokens=True)[0]
+ return caption
+```
+
+Then, we load an input image for conditioning and obtain a suitable caption for it:
+
+```py
+from diffusers.utils import load_image
+
+img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
+raw_image = load_image(img_url).convert("RGB").resize((768, 768))
+caption = generate_caption(raw_image, model, processor)
+```
+
+Then, we employ the generated caption and the input image to get the inverted latents:
+
+```py
+from diffusers import DDIMInverseScheduler, DDIMScheduler
+
+pipeline = StableDiffusionDiffEditPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
+)
+pipeline = pipeline.to("cuda")
+pipeline.enable_model_cpu_offload()
+pipeline.enable_vae_slicing()
+
+pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
+pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
+
+generator = torch.manual_seed(0)
+inv_latents = pipeline.invert(prompt=caption, image=raw_image, generator=generator).latents
+```
+
+Now, generate the image with the inverted latents and semantically generated mask from our source and target prompts:
+
+```py
+source_prompt = "a bowl of fruits"
+target_prompt = "a basket of fruits"
+
+mask_image = pipeline.generate_mask(
+ image=raw_image,
+ source_prompt=source_prompt,
+ target_prompt=target_prompt,
+ generator=generator,
+)
+
+image = pipeline(
+ prompt=target_prompt,
+ mask_image=mask_image,
+ image_latents=inv_latents,
+ generator=generator,
+ negative_prompt=source_prompt,
+).images[0]
+image.save("edited_image.png")
+```
+
+## Generating source and target embeddings
+
+The authors originally required the user to manually provide the source and target prompts for discovering
+edit directions. However, we can also leverage open source and public models for the same purpose.
+Below, we provide an end-to-end example with the [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) model
+for generating source an target embeddings.
+
+**1. Load the generation model**:
+
+```py
+import torch
+from transformers import AutoTokenizer, T5ForConditionalGeneration
+
+tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl")
+model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", torch_dtype=torch.float16)
+```
+
+**2. Construct a starting prompt**:
+
+```py
+source_concept = "bowl"
+target_concept = "basket"
+
+source_text = f"Provide a caption for images containing a {source_concept}. "
+"The captions should be in English and should be no longer than 150 characters."
+
+target_text = f"Provide a caption for images containing a {target_concept}. "
+"The captions should be in English and should be no longer than 150 characters."
+```
+
+Here, we're interested in the "bowl -> basket" direction.
+
+**3. Generate prompts**:
+
+We can use a utility like so for this purpose.
+
+```py
+@torch.no_grad
+def generate_prompts(input_prompt):
+ input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids.to("cuda")
+
+ outputs = model.generate(
+ input_ids, temperature=0.8, num_return_sequences=16, do_sample=True, max_new_tokens=128, top_k=10
+ )
+ return tokenizer.batch_decode(outputs, skip_special_tokens=True)
+```
+
+And then we just call it to generate our prompts:
+
+```py
+source_prompts = generate_prompts(source_text)
+target_prompts = generate_prompts(target_text)
+```
+
+We encourage you to play around with the different parameters supported by the
+`generate()` method ([documentation](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate)) for the generation quality you are looking for.
+
+**4. Load the embedding model**:
+
+Here, we need to use the same text encoder model used by the subsequent Stable Diffusion model.
+
+```py
+from diffusers import StableDiffusionDiffEditPipeline
+
+pipeline = StableDiffusionDiffEditPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
+)
+pipeline = pipeline.to("cuda")
+pipeline.enable_model_cpu_offload()
+pipeline.enable_vae_slicing()
+
+generator = torch.manual_seed(0)
+```
+
+**5. Compute embeddings**:
+
+```py
+import torch
+
+@torch.no_grad()
+def embed_prompts(sentences, tokenizer, text_encoder, device="cuda"):
+ embeddings = []
+ for sent in sentences:
+ text_inputs = tokenizer(
+ sent,
+ padding="max_length",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ text_input_ids = text_inputs.input_ids
+ prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=None)[0]
+ embeddings.append(prompt_embeds)
+ return torch.concatenate(embeddings, dim=0).mean(dim=0).unsqueeze(0)
+
+source_embeddings = embed_prompts(source_prompts, pipeline.tokenizer, pipeline.text_encoder)
+target_embeddings = embed_prompts(target_captions, pipeline.tokenizer, pipeline.text_encoder)
+```
+
+And you're done! Now, you can use these embeddings directly while calling the pipeline:
+
+```py
+from diffusers import DDIMInverseScheduler, DDIMScheduler
+from diffusers.utils import load_image
+
+pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
+pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
+
+img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
+raw_image = load_image(img_url).convert("RGB").resize((768, 768))
+
+
+mask_image = pipeline.generate_mask(
+ image=raw_image,
+ source_prompt_embeds=source_embeds,
+ target_prompt_embeds=target_embeds,
+ generator=generator,
+)
+
+inv_latents = pipeline.invert(
+ prompt_embeds=source_embeds,
+ image=raw_image,
+ generator=generator,
+).latents
+
+images = pipeline(
+ mask_image=mask_image,
+ image_latents=inv_latents,
+ prompt_embeds=target_embeddings,
+ negative_prompt_embeds=source_embeddings,
+ generator=generator,
+).images
+images[0].save("edited_image.png")
+```
+
+## StableDiffusionDiffEditPipeline
+[[autodoc]] StableDiffusionDiffEditPipeline
+ - all
+ - generate_mask
+ - invert
+ - __call__
\ No newline at end of file
diff --git a/docs/source/en/api/pipelines/stable_diffusion/overview.mdx b/docs/source/en/api/pipelines/stable_diffusion/overview.mdx
index 70731fd294b9..a163b57f2a84 100644
--- a/docs/source/en/api/pipelines/stable_diffusion/overview.mdx
+++ b/docs/source/en/api/pipelines/stable_diffusion/overview.mdx
@@ -36,6 +36,7 @@ For more details about how Stable Diffusion works and how it differs from the ba
| [StableDiffusionAttendAndExcitePipeline](./attend_and_excite) | **Experimental** – *Text-to-Image Generation * | | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://huggingface.co/spaces/AttendAndExcite/Attend-and-Excite)
| [StableDiffusionPix2PixZeroPipeline](./pix2pix_zero) | **Experimental** – *Text-Based Image Editing * | | [Zero-shot Image-to-Image Translation](https://arxiv.org/abs/2302.03027)
| [StableDiffusionModelEditingPipeline](./model_editing) | **Experimental** – *Text-to-Image Model Editing * | | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://arxiv.org/abs/2303.08084)
+| [StableDiffusionDiffEditPipeline](./diffedit) | **Experimental** – *Text-Based Image Editing * | | [DiffEdit: Diffusion-based semantic image editing with mask guidance](https://arxiv.org/abs/2210.11427)
diff --git a/docs/source/en/training/controlnet.mdx b/docs/source/en/training/controlnet.mdx
index 94e3d969b80a..1c91298477c7 100644
--- a/docs/source/en/training/controlnet.mdx
+++ b/docs/source/en/training/controlnet.mdx
@@ -33,7 +33,12 @@ cd diffusers
pip install -e .
```
-Then navigate into the example folder and run:
+Then navigate into the [example folder](https://github.com/huggingface/diffusers/tree/main/examples/controlnet)
+```bash
+cd examples/controlnet
+```
+
+Now run:
```bash
pip install -r requirements.txt
```
diff --git a/docs/source/en/training/custom_diffusion.mdx b/docs/source/en/training/custom_diffusion.mdx
index 08604f101ea2..ee8fb19bd18c 100644
--- a/docs/source/en/training/custom_diffusion.mdx
+++ b/docs/source/en/training/custom_diffusion.mdx
@@ -33,7 +33,13 @@ cd diffusers
pip install -e .
```
-Then cd in the example folder and run
+Then cd into the [example folder](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion)
+
+```
+cd examples/custom_diffusion
+```
+
+Now run
```bash
pip install -r requirements.txt
diff --git a/docs/source/en/training/dreambooth.mdx b/docs/source/en/training/dreambooth.mdx
index c5a5a047d114..09b877c7d0cc 100644
--- a/docs/source/en/training/dreambooth.mdx
+++ b/docs/source/en/training/dreambooth.mdx
@@ -98,7 +98,8 @@ accelerate launch train_dreambooth.py \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
- --max_train_steps=400
+ --max_train_steps=400 \
+ --push_to_hub
```
@@ -161,7 +162,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
@@ -225,7 +227,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
@@ -387,7 +390,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
### 12GB GPU
@@ -418,7 +422,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
### 8 GB GPU
@@ -464,7 +469,8 @@ accelerate launch train_dreambooth.py \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800 \
- --mixed_precision=fp16
+ --mixed_precision=fp16 \
+ --push_to_hub
```
## Inference
diff --git a/docs/source/en/training/instructpix2pix.mdx b/docs/source/en/training/instructpix2pix.mdx
index ff34ec335656..6b6d4d908673 100644
--- a/docs/source/en/training/instructpix2pix.mdx
+++ b/docs/source/en/training/instructpix2pix.mdx
@@ -24,7 +24,7 @@ The output is an "edited" image that reflects the edit instruction applied on th
-The `train_instruct_pix2pix.py` script shows how to implement the training procedure and adapt it for Stable Diffusion.
+The `train_instruct_pix2pix.py` script (you can find the it [here](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py)) shows how to implement the training procedure and adapt it for Stable Diffusion.
***Disclaimer: Even though `train_instruct_pix2pix.py` implements the InstructPix2Pix
training procedure while being faithful to the [original implementation](https://github.com/timothybrooks/instruct-pix2pix) we have only tested it on a [small-scale dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples). This can impact the end results. For better results, we recommend longer training runs with a larger dataset. [Here](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) you can find a large dataset for InstructPix2Pix training.***
@@ -44,7 +44,12 @@ cd diffusers
pip install -e .
```
-Then cd in the example folder and run
+Then cd in the example folder
+```bash
+cd examples/instruct_pix2pix
+```
+
+Now run
```bash
pip install -r requirements.txt
```
diff --git a/docs/source/zh/_toctree.yml b/docs/source/zh/_toctree.yml
index 2d67d9c4a025..58f6ac09faef 100644
--- a/docs/source/zh/_toctree.yml
+++ b/docs/source/zh/_toctree.yml
@@ -4,51 +4,79 @@
- local: quicktour
title: 快速入门
- local: stable_diffusion
- title: Stable Diffusion
+ title: Effective and efficient diffusion
- local: installation
title: 安装
title: 开始
- sections:
+ - local: tutorials/tutorial_overview
+ title: Overview
+ - local: using-diffusers/write_own_pipeline
+ title: Understanding models and schedulers
- local: tutorials/basic_training
title: Train a diffusion model
title: Tutorials
- sections:
- sections:
+ - local: using-diffusers/loading_overview
+ title: Overview
- local: using-diffusers/loading
- title: Loading Pipelines, Models, and Schedulers
+ title: Load pipelines, models, and schedulers
- local: using-diffusers/schedulers
- title: Using different Schedulers
- - local: using-diffusers/configuration
- title: Configuring Pipelines, Models, and Schedulers
+ title: Load and compare different schedulers
- local: using-diffusers/custom_pipeline_overview
- title: Loading and Adding Custom Pipelines
+ title: Load community pipelines
- local: using-diffusers/kerascv
- title: Using KerasCV Stable Diffusion Checkpoints in Diffusers
+ title: Load KerasCV Stable Diffusion checkpoints
title: Loading & Hub
- sections:
+ - local: using-diffusers/pipeline_overview
+ title: Overview
- local: using-diffusers/unconditional_image_generation
- title: Unconditional Image Generation
+ title: Unconditional image generation
- local: using-diffusers/conditional_image_generation
- title: Text-to-Image Generation
+ title: Text-to-image generation
- local: using-diffusers/img2img
- title: Text-Guided Image-to-Image
+ title: Text-guided image-to-image
- local: using-diffusers/inpaint
- title: Text-Guided Image-Inpainting
+ title: Text-guided image-inpainting
- local: using-diffusers/depth2img
- title: Text-Guided Depth-to-Image
- - local: using-diffusers/controlling_generation
- title: Controlling generation
+ title: Text-guided depth-to-image
- local: using-diffusers/reusing_seeds
- title: Reusing seeds for deterministic generation
+ title: Improve image quality with deterministic generation
- local: using-diffusers/reproducibility
- title: Reproducibility
+ title: Create reproducible pipelines
- local: using-diffusers/custom_pipeline_examples
- title: Community Pipelines
+ title: Community pipelines
- local: using-diffusers/contribute_pipeline
- title: How to contribute a Pipeline
+ title: How to contribute a community pipeline
- local: using-diffusers/using_safetensors
title: Using safetensors
+ - local: using-diffusers/stable_diffusion_jax_how_to
+ title: Stable Diffusion in JAX/Flax
+ - local: using-diffusers/weighted_prompts
+ title: Weighting Prompts
title: Pipelines for Inference
+ - sections:
+ - local: training/overview
+ title: Overview
+ - local: training/unconditional_training
+ title: Unconditional image generation
+ - local: training/text_inversion
+ title: Textual Inversion
+ - local: training/dreambooth
+ title: DreamBooth
+ - local: training/text2image
+ title: Text-to-image
+ - local: training/lora
+ title: Low-Rank Adaptation of Large Language Models (LoRA)
+ - local: training/controlnet
+ title: ControlNet
+ - local: training/instructpix2pix
+ title: InstructPix2Pix Training
+ - local: training/custom_diffusion
+ title: Custom Diffusion
+ title: Training
- sections:
- local: using-diffusers/rl
title: Reinforcement Learning
@@ -59,6 +87,8 @@
title: Taking Diffusers Beyond Images
title: Using Diffusers
- sections:
+ - local: optimization/opt_overview
+ title: Overview
- local: optimization/fp16
title: Memory and Speed
- local: optimization/torch2.0
@@ -69,32 +99,26 @@
title: ONNX
- local: optimization/open_vino
title: OpenVINO
+ - local: optimization/coreml
+ title: Core ML
- local: optimization/mps
title: MPS
- local: optimization/habana
title: Habana Gaudi
+ - local: optimization/tome
+ title: Token Merging
title: Optimization/Special Hardware
-- sections:
- - local: training/overview
- title: Overview
- - local: training/unconditional_training
- title: Unconditional Image Generation
- - local: training/text_inversion
- title: Textual Inversion
- - local: training/dreambooth
- title: DreamBooth
- - local: training/text2image
- title: Text-to-image
- - local: training/lora
- title: Low-Rank Adaptation of Large Language Models (LoRA)
- title: Training
- sections:
- local: conceptual/philosophy
title: Philosophy
+ - local: using-diffusers/controlling_generation
+ title: Controlled generation
- local: conceptual/contribution
title: How to contribute?
- local: conceptual/ethical_guidelines
title: Diffusers' Ethical Guidelines
+ - local: conceptual/evaluation
+ title: Evaluating Diffusion Models
title: Conceptual Guides
- sections:
- sections:
@@ -118,6 +142,8 @@
title: AltDiffusion
- local: api/pipelines/audio_diffusion
title: Audio Diffusion
+ - local: api/pipelines/audioldm
+ title: AudioLDM
- local: api/pipelines/cycle_diffusion
title: Cycle Diffusion
- local: api/pipelines/dance_diffusion
@@ -128,6 +154,8 @@
title: DDPM
- local: api/pipelines/dit
title: DiT
+ - local: api/pipelines/if
+ title: IF
- local: api/pipelines/latent_diffusion
title: Latent Diffusion
- local: api/pipelines/paint_by_example
@@ -142,6 +170,8 @@
title: Score SDE VE
- local: api/pipelines/semantic_stable_diffusion
title: Semantic Guidance
+ - local: api/pipelines/spectrogram_diffusion
+ title: "Spectrogram Diffusion"
- sections:
- local: api/pipelines/stable_diffusion/overview
title: Overview
@@ -171,6 +201,8 @@
title: MultiDiffusion Panorama
- local: api/pipelines/stable_diffusion/controlnet
title: Text-to-Image Generation with ControlNet Conditioning
+ - local: api/pipelines/stable_diffusion/model_editing
+ title: Text-to-Image Model Editing
title: Stable Diffusion
- local: api/pipelines/stable_diffusion_2
title: Stable Diffusion 2
@@ -178,6 +210,10 @@
title: Stable unCLIP
- local: api/pipelines/stochastic_karras_ve
title: Stochastic Karras VE
+ - local: api/pipelines/text_to_video
+ title: Text-to-Video
+ - local: api/pipelines/text_to_video_zero
+ title: Text-to-Video Zero
- local: api/pipelines/unclip
title: UnCLIP
- local: api/pipelines/latent_diffusion_uncond
@@ -235,4 +271,4 @@
- local: api/experimental/rl
title: RL Planning
title: Experimental Features
- title: API
+ title: API
\ No newline at end of file
diff --git a/docs/source/zh/index.mdx b/docs/source/zh/index.mdx
index 4f952c5db79c..e1a2a3971d87 100644
--- a/docs/source/zh/index.mdx
+++ b/docs/source/zh/index.mdx
@@ -18,61 +18,84 @@ specific language governing permissions and limitations under the License.
# 🧨 Diffusers
-🤗Diffusers提供了预训练好的视觉和音频扩散模型,并可以作为推理和训练的模块化工具箱。
+🤗 Diffusers 是一个值得首选用于生成图像、音频甚至 3D 分子结构的,最先进的预训练扩散模型库。
+无论您是在寻找简单的推理解决方案,还是想训练自己的扩散模型,🤗 Diffusers 这一模块化工具箱都能对其提供支持。
+本库的设计更偏重于[可用而非高性能](conceptual/philosophy#usability-over-performance)、[简明而非简单](conceptual/philosophy#simple-over-easy)以及[易用而非抽象](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)。
-更准确地说,🤗Diffusers提供了:
-- 最先进的扩散管道,可以在推理中仅用几行代码运行(详情看[**Using Diffusers**](./using-diffusers/conditional_image_generation))或看[**管道**](#pipelines) 以获取所有支持的管道及其对应的论文的概述。
-- 可以在推理中交替使用的各种噪声调度程序,以便在推理过程中权衡如何选择速度和质量。有关更多信息,可以看[**Schedulers**](./api/schedulers/overview)。
-- 多种类型的模型,如U-Net,可用作端到端扩散系统中的构建模块。有关更多详细信息,可以看 [**Models**](./api/models) 。
-- 训练示例,展示如何训练最流行的扩散模型任务。更多相关信息,可以看[**Training**](./training/overview)。
+本库包含三个主要组件:
+- 最先进的扩散管道 [diffusion pipelines](api/pipelines/overview),只需几行代码即可进行推理。
+- 可交替使用的各种噪声调度器 [noise schedulers](api/schedulers/overview),用于平衡生成速度和质量。
+- 预训练模型 [models](api/models),可作为构建模块,并与调度程序结合使用,来创建您自己的端到端扩散系统。
-## 🧨 Diffusers pipelines
-
-下表总结了所有官方支持的pipelines及其对应的论文,部分提供了colab,可以直接尝试一下。
+
+## 🧨 Diffusers pipelines
-| 管道 | 论文 | 任务 | Colab
-|---|---|:---:|:---:|
-| [alt_diffusion](./api/pipelines/alt_diffusion) | [**AltDiffusion**](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
-| [audio_diffusion](./api/pipelines/audio_diffusion) | [**Audio Diffusion**](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation | [](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/audio_diffusion_pipeline.ipynb)
-| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [**ControlNet with Stable Diffusion**](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
-| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [**Cycle Diffusion**](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
-| [dance_diffusion](./api/pipelines/dance_diffusion) | [**Dance Diffusion**](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
-| [ddpm](./api/pipelines/ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
-| [ddim](./api/pipelines/ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
-| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
-| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
-| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
-| [paint_by_example](./api/pipelines/paint_by_example) | [**Paint by Example: Exemplar-based Image Editing with Diffusion Models**](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
-| [pndm](./api/pipelines/pndm) | [**Pseudo Numerical Methods for Diffusion Models on Manifolds**](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
-| [score_sde_ve](./api/pipelines/score_sde_ve) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
-| [score_sde_vp](./api/pipelines/score_sde_vp) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
-| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [**Semantic Guidance**](https://arxiv.org/abs/2301.12247) | Text-Guided Generation | [](https://colab.research.google.com/github/ml-research/semantic-image-editing/blob/main/examples/SemanticGuidance.ipynb)
-| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
-| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
-| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb)
-| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [**MultiDiffusion**](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
-| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [**InstructPix2Pix**](https://github.com/timothybrooks/instruct-pix2pix) | Text-Guided Image Editing|
-| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [**Zero-shot Image-to-Image Translation**](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
-| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [**Attend and Excite for Stable Diffusion**](https://attendandexcite.github.io/Attend-and-Excite/) | Text-to-Image Generation |
-| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [**Self-Attention Guidance**](https://ku-cvlab.github.io/Self-Attention-Guidance) | Text-to-Image Generation |
-| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [**Stable Diffusion Image Variations**](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
-| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [**Stable Diffusion Latent Upscaler**](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
-| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
-| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
-| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Depth-Conditional Stable Diffusion**](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
-| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
-| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [**Safe Stable Diffusion**](https://arxiv.org/abs/2211.05105) | Text-Guided Generation | [](https://colab.research.google.com/github/ml-research/safe-latent-diffusion/blob/main/examples/Safe%20Latent%20Diffusion.ipynb)
-| [stable_unclip](./stable_unclip) | **Stable unCLIP** | Text-to-Image Generation |
-| [stable_unclip](./stable_unclip) | **Stable unCLIP** | Image-to-Image Text-Guided Generation |
-| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [**Elucidating the Design Space of Diffusion-Based Generative Models**](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
-| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125) | Text-to-Image Generation |
-| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
-| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
-| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
-| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
-
+下表汇总了当前所有官方支持的pipelines及其对应的论文.
-**注意**: 管道是如何使用相应论文中提出的扩散模型的简单示例。
\ No newline at end of file
+| 管道 | 论文/仓库 | 任务 |
+|---|---|:---:|
+| [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
+| [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation |
+| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
+| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
+| [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
+| [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
+| [ddim](./api/pipelines/ddim) | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
+| [if](./if) | [**IF**](./api/pipelines/if) | Image Generation |
+| [if_img2img](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
+| [if_inpainting](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
+| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
+| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
+| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
+| [paint_by_example](./api/pipelines/paint_by_example) | [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
+| [pndm](./api/pipelines/pndm) | [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
+| [score_sde_ve](./api/pipelines/score_sde_ve) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
+| [score_sde_vp](./api/pipelines/score_sde_vp) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
+| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [Semantic Guidance](https://arxiv.org/abs/2301.12247) | Text-Guided Generation |
+| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation |
+| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation |
+| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting |
+| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [MultiDiffusion](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
+| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800) | Text-Guided Image Editing|
+| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [Zero-shot Image-to-Image Translation](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
+| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://arxiv.org/abs/2301.13826) | Text-to-Image Generation |
+| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://arxiv.org/abs/2210.00939) | Text-to-Image Generation Unconditional Image Generation |
+| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
+| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [Stable Diffusion Latent Upscaler](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
+| [stable_diffusion_model_editing](./api/pipelines/stable_diffusion/model_editing) | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://time-diffusion.github.io/) | Text-to-Image Model Editing |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Depth-Conditional Stable Diffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
+| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [Safe Stable Diffusion](https://arxiv.org/abs/2211.05105) | Text-Guided Generation |
+| [stable_unclip](./stable_unclip) | Stable unCLIP | Text-to-Image Generation |
+| [stable_unclip](./stable_unclip) | Stable unCLIP | Image-to-Image Text-Guided Generation |
+| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
+| [text_to_video_sd](./api/pipelines/text_to_video) | [Modelscope's Text-to-video-synthesis Model in Open Domain](https://modelscope.cn/models/damo/text-to-video-synthesis/summary) | Text-to-Video Generation |
+| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125)(implementation by [kakaobrain](https://github.com/kakaobrain/karlo)) | Text-to-Image Generation |
+| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
+| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
+| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
+| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
diff --git a/docs/source/zh/installation.mdx b/docs/source/zh/installation.mdx
index cda91df8a6cd..8cd3ad97cc21 100644
--- a/docs/source/zh/installation.mdx
+++ b/docs/source/zh/installation.mdx
@@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.
# 安装
-安装🤗 Diffusers 到你正在使用的任何深度学习框架中。
+在你正在使用的任意深度学习框架中安装 🤗 Diffusers 。
🤗 Diffusers已在Python 3.7+、PyTorch 1.7.0+和Flax上进行了测试。按照下面的安装说明,针对你正在使用的深度学习框架进行安装:
@@ -21,11 +21,11 @@ specific language governing permissions and limitations under the License.
## 使用pip安装
-你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装🤗 Diffusers 。
+你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装 🤗 Diffusers 。
如果你对 Python 虚拟环境不熟悉,可以看看这个[教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
-使用虚拟环境你可以轻松管理不同的项目,避免了依赖项之间的兼容性问题。
+在虚拟环境中,你可以轻松管理不同的项目,避免依赖项之间的兼容性问题。
首先,在你的项目目录下创建一个虚拟环境:
@@ -39,7 +39,7 @@ python -m venv .env
source .env/bin/activate
```
-现在你就可以安装 🤗 Diffusers了!使用下边这个命令:
+现在,你就可以安装 🤗 Diffusers了!使用下边这个命令:
**PyTorch**
@@ -55,7 +55,7 @@ pip install diffusers["flax"]
## 从源代码安装
-在从源代码安装 `diffusers` 之前,你先确定你已经安装了 `torch` 和 `accelerate`。
+在从源代码安装 `diffusers` 之前,确保你已经安装了 `torch` 和 `accelerate`。
`torch`的安装教程可以看 `torch` [文档](https://pytorch.org/get-started/locally/#start-locally).
@@ -65,17 +65,17 @@ pip install diffusers["flax"]
pip install accelerate
```
-从源码安装 🤗 Diffusers 使用以下命令:
+从源码安装 🤗 Diffusers 需要使用以下命令:
```bash
pip install git+https://github.com/huggingface/diffusers
```
这个命令安装的是最新的 `main`版本,而不是最近的`stable`版。
-`main`是一直和最新进展保持一致的。比如,上次正式版发布了,有bug,新的正式版还没推出,但是`main`中可以看到这个bug被修复了。
-但是这也意味着 `main`版本并不总是稳定的。
+`main`是一直和最新进展保持一致的。比如,上次发布的正式版中有bug,在`main`中可以看到这个bug被修复了,但是新的正式版此时尚未推出。
+但是这也意味着 `main`版本不保证是稳定的。
-我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决
+我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决
如果你遇到了问题,可以提 [Issue](https://github.com/huggingface/transformers/issues),这样我们就能更快修复问题了。
@@ -105,8 +105,8 @@ pip install -e ".[torch]"
pip install -e ".[flax]"
```
-这些命令将连接你克隆的版本库和你的 Python 库路径。
-现在,除了正常的库路径外,Python 还会在你克隆的文件夹内寻找。
+这些命令将连接到你克隆的版本库和你的 Python 库路径。
+现在,不只是在通常的库路径,Python 还会在你克隆的文件夹内寻找包。
例如,如果你的 Python 包通常安装在 `~/anaconda3/envs/main/lib/python3.7/Site-packages/`,Python 也会搜索你克隆到的文件夹。`~/diffusers/`。
@@ -116,32 +116,31 @@ pip install -e ".[flax]"
-现在你可以用下面的命令轻松地将你克隆的🤗Diffusers仓库更新到最新版本。
+现在你可以用下面的命令轻松地将你克隆的 🤗 Diffusers 库更新到最新版本。
```bash
cd ~/diffusers/
git pull
```
-你的Python环境将在下次运行时找到`main`版本的🤗 Diffusers。
+你的Python环境将在下次运行时找到`main`版本的 🤗 Diffusers。
-## 注意遥测日志
+## 注意 Telemetry 日志
-我们的库会在使用`from_pretrained()`请求期间收集信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道,以及预训练检查点的路径(如果它被托管在Hub上)。
+我们的库会在使用`from_pretrained()`请求期间收集 telemetry 信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道类,以及预训练检查点的路径(如果它被托管在Hub上的话)。
+这些使用数据有助于我们调试问题并确定新功能的开发优先级。
+Telemetry 数据仅在从 HuggingFace Hub 中加载模型和管道时发送,而不会在本地使用期间收集。
-这些使用数据有助于我们调试问题并优先考虑新功能。
-当从HuggingFace Hub加载模型和管道时才会发送遥测数据,并且在本地使用时不会收集数据。
+我们知道,并不是每个人都想分享这些的信息,我们尊重您的隐私,
+因此您可以通过在终端中设置 `DISABLE_TELEMETRY` 环境变量从而禁用 Telemetry 数据收集:
-我们知道并不是每个人都想分享这些的信息,我们尊重您的隐私,
-因此您可以通过在终端中设置“DISABLE_TELEMETRY”环境变量来禁用遥测数据的收集:
-
-在Linux/MacOS中:
+Linux/MacOS :
```bash
export DISABLE_TELEMETRY=YES
```
-在Windows中:
+Windows :
```bash
set DISABLE_TELEMETRY=YES
```
\ No newline at end of file
diff --git a/examples/community/stable_diffusion_controlnet_inpaint.py b/examples/community/stable_diffusion_controlnet_inpaint.py
index c47f4c3194e8..aae199f91b9e 100644
--- a/examples/community/stable_diffusion_controlnet_inpaint.py
+++ b/examples/community/stable_diffusion_controlnet_inpaint.py
@@ -1,7 +1,7 @@
# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
import inspect
-from typing import Any, Callable, Dict, List, Optional, Union
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
@@ -11,6 +11,7 @@
from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
+from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
PIL_INTERPOLATION,
@@ -184,7 +185,14 @@ def prepare_mask_image(mask_image):
def prepare_controlnet_conditioning_image(
- controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype
+ controlnet_conditioning_image,
+ width,
+ height,
+ batch_size,
+ num_images_per_prompt,
+ device,
+ dtype,
+ do_classifier_free_guidance,
):
if not isinstance(controlnet_conditioning_image, torch.Tensor):
if isinstance(controlnet_conditioning_image, PIL.Image.Image):
@@ -214,6 +222,9 @@ def prepare_controlnet_conditioning_image(
controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
+ if do_classifier_free_guidance:
+ controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
+
return controlnet_conditioning_image
@@ -230,7 +241,7 @@ def __init__(
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
- controlnet: ControlNetModel,
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
@@ -254,6 +265,9 @@ def __init__(
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
+ if isinstance(controlnet, (list, tuple)):
+ controlnet = MultiControlNetModel(controlnet)
+
self.register_modules(
vae=vae,
text_encoder=text_encoder,
@@ -264,6 +278,7 @@ def __init__(
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.register_to_config(requires_safety_checker=requires_safety_checker)
@@ -522,6 +537,42 @@ def prepare_extra_step_kwargs(self, generator, eta):
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
+ def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds):
+ image_is_pil = isinstance(image, PIL.Image.Image)
+ image_is_tensor = isinstance(image, torch.Tensor)
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
+
+ if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
+ raise TypeError(
+ "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
+ )
+
+ if image_is_pil:
+ image_batch_size = 1
+ elif image_is_tensor:
+ image_batch_size = image.shape[0]
+ elif image_is_pil_list:
+ image_batch_size = len(image)
+ elif image_is_tensor_list:
+ image_batch_size = len(image)
+ else:
+ raise ValueError("controlnet condition image is not valid")
+
+ if prompt is not None and isinstance(prompt, str):
+ prompt_batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ prompt_batch_size = len(prompt)
+ elif prompt_embeds is not None:
+ prompt_batch_size = prompt_embeds.shape[0]
+ else:
+ raise ValueError("prompt or prompt_embeds are not valid")
+
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
+ raise ValueError(
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
+ )
+
def check_inputs(
self,
prompt,
@@ -534,6 +585,7 @@ def check_inputs(
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
+ controlnet_conditioning_scale=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
@@ -572,45 +624,35 @@ def check_inputs(
f" {negative_prompt_embeds.shape}."
)
- controlnet_cond_image_is_pil = isinstance(controlnet_conditioning_image, PIL.Image.Image)
- controlnet_cond_image_is_tensor = isinstance(controlnet_conditioning_image, torch.Tensor)
- controlnet_cond_image_is_pil_list = isinstance(controlnet_conditioning_image, list) and isinstance(
- controlnet_conditioning_image[0], PIL.Image.Image
- )
- controlnet_cond_image_is_tensor_list = isinstance(controlnet_conditioning_image, list) and isinstance(
- controlnet_conditioning_image[0], torch.Tensor
- )
-
- if (
- not controlnet_cond_image_is_pil
- and not controlnet_cond_image_is_tensor
- and not controlnet_cond_image_is_pil_list
- and not controlnet_cond_image_is_tensor_list
- ):
- raise TypeError(
- "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
- )
-
- if controlnet_cond_image_is_pil:
- controlnet_cond_image_batch_size = 1
- elif controlnet_cond_image_is_tensor:
- controlnet_cond_image_batch_size = controlnet_conditioning_image.shape[0]
- elif controlnet_cond_image_is_pil_list:
- controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
- elif controlnet_cond_image_is_tensor_list:
- controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
-
- if prompt is not None and isinstance(prompt, str):
- prompt_batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- prompt_batch_size = len(prompt)
- elif prompt_embeds is not None:
- prompt_batch_size = prompt_embeds.shape[0]
-
- if controlnet_cond_image_batch_size != 1 and controlnet_cond_image_batch_size != prompt_batch_size:
- raise ValueError(
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {controlnet_cond_image_batch_size}, prompt batch size: {prompt_batch_size}"
- )
+ # check controlnet condition image
+ if isinstance(self.controlnet, ControlNetModel):
+ self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds)
+ elif isinstance(self.controlnet, MultiControlNetModel):
+ if not isinstance(controlnet_conditioning_image, list):
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
+ if len(controlnet_conditioning_image) != len(self.controlnet.nets):
+ raise ValueError(
+ "For multiple controlnets: `image` must have the same length as the number of controlnets."
+ )
+ for image_ in controlnet_conditioning_image:
+ self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds)
+ else:
+ assert False
+
+ # Check `controlnet_conditioning_scale`
+ if isinstance(self.controlnet, ControlNetModel):
+ if not isinstance(controlnet_conditioning_scale, float):
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
+ elif isinstance(self.controlnet, MultiControlNetModel):
+ if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
+ self.controlnet.nets
+ ):
+ raise ValueError(
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
+ " the same length as the number of controlnets"
+ )
+ else:
+ assert False
if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor):
raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor")
@@ -630,6 +672,8 @@ def check_inputs(
image_channels, image_height, image_width = image.shape
elif image.ndim == 4:
image_batch_size, image_channels, image_height, image_width = image.shape
+ else:
+ assert False
if mask_image.ndim == 2:
mask_image_batch_size = 1
@@ -797,7 +841,7 @@ def __call__(
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- controlnet_conditioning_scale: float = 1.0,
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
):
r"""
Function invoked when calling the pipeline for generation.
@@ -897,6 +941,7 @@ def __call__(
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
+ controlnet_conditioning_scale,
)
# 2. Define call parameters
@@ -913,6 +958,9 @@ def __call__(
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
+ if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
+
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
@@ -929,15 +977,37 @@ def __call__(
mask_image = prepare_mask_image(mask_image)
- controlnet_conditioning_image = prepare_controlnet_conditioning_image(
- controlnet_conditioning_image,
- width,
- height,
- batch_size * num_images_per_prompt,
- num_images_per_prompt,
- device,
- self.controlnet.dtype,
- )
+ # condition image(s)
+ if isinstance(self.controlnet, ControlNetModel):
+ controlnet_conditioning_image = prepare_controlnet_conditioning_image(
+ controlnet_conditioning_image=controlnet_conditioning_image,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=self.controlnet.dtype,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ )
+ elif isinstance(self.controlnet, MultiControlNetModel):
+ controlnet_conditioning_images = []
+
+ for image_ in controlnet_conditioning_image:
+ image_ = prepare_controlnet_conditioning_image(
+ controlnet_conditioning_image=image_,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=self.controlnet.dtype,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ )
+ controlnet_conditioning_images.append(image_)
+
+ controlnet_conditioning_image = controlnet_conditioning_images
+ else:
+ assert False
masked_image = image * (mask_image < 0.5)
@@ -979,9 +1049,6 @@ def __call__(
do_classifier_free_guidance,
)
- if do_classifier_free_guidance:
- controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
@@ -1007,15 +1074,10 @@ def __call__(
t,
encoder_hidden_states=prompt_embeds,
controlnet_cond=controlnet_conditioning_image,
+ conditioning_scale=controlnet_conditioning_scale,
return_dict=False,
)
- down_block_res_samples = [
- down_block_res_sample * controlnet_conditioning_scale
- for down_block_res_sample in down_block_res_samples
- ]
- mid_block_res_sample *= controlnet_conditioning_scale
-
# predict the noise residual
noise_pred = self.unet(
inpainting_latent_model_input,
diff --git a/examples/dreambooth/README.md b/examples/dreambooth/README.md
index e1eb8a06b0ff..490e31458988 100644
--- a/examples/dreambooth/README.md
+++ b/examples/dreambooth/README.md
@@ -80,7 +80,8 @@ accelerate launch train_dreambooth.py \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
- --max_train_steps=400
+ --max_train_steps=400 \
+ --push_to_hub
```
### Training with prior-preservation loss
@@ -109,7 +110,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
@@ -141,7 +143,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
@@ -176,7 +179,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
@@ -218,7 +222,8 @@ accelerate launch --mixed_precision="fp16" train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
### Fine-tune text encoder with the UNet.
@@ -251,7 +256,8 @@ accelerate launch train_dreambooth.py \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
- --max_train_steps=800
+ --max_train_steps=800 \
+ --push_to_hub
```
### Using DreamBooth for pipelines other than Stable Diffusion
diff --git a/examples/dreambooth/train_dreambooth.py b/examples/dreambooth/train_dreambooth.py
index 593af005d6f4..190f4625a16c 100644
--- a/examples/dreambooth/train_dreambooth.py
+++ b/examples/dreambooth/train_dreambooth.py
@@ -61,6 +61,39 @@
logger = get_logger(__name__)
+def save_model_card(repo_id: str, images=None, base_model=str, train_text_encoder=False, prompt=str, repo_folder=None):
+ img_str = ""
+ for i, image in enumerate(images):
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
+ img_str += f"\n"
+
+ yaml = f"""
+---
+license: creativeml-openrail-m
+base_model: {base_model}
+instance_prompt: {prompt}
+tags:
+- stable-diffusion
+- stable-diffusion-diffusers
+- text-to-image
+- diffusers
+- dreambooth
+inference: true
+---
+ """
+ model_card = f"""
+# DreamBooth - {repo_id}
+
+This is a dreambooth model derived from {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/).
+You can find some example images in the following. \n
+{img_str}
+
+DreamBooth for the text encoder was enabled: {train_text_encoder}.
+"""
+ with open(os.path.join(repo_folder, "README.md"), "w") as f:
+ f.write(yaml + model_card)
+
+
def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch):
logger.info(
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
@@ -104,6 +137,8 @@ def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight
del pipeline
torch.cuda.empty_cache()
+ return images
+
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
@@ -997,13 +1032,16 @@ def load_model_hook(models, input_dir):
global_step += 1
if accelerator.is_main_process:
+ images = []
if global_step % args.checkpointing_steps == 0:
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if args.validation_prompt is not None and global_step % args.validation_steps == 0:
- log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch)
+ images = log_validation(
+ text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch
+ )
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
@@ -1024,6 +1062,14 @@ def load_model_hook(models, input_dir):
pipeline.save_pretrained(args.output_dir)
if args.push_to_hub:
+ save_model_card(
+ repo_id,
+ images=images,
+ base_model=args.pretrained_model_name_or_path,
+ train_text_encoder=args.train_text_encoder,
+ prompt=args.instance_prompt,
+ repo_folder=args.output_dir,
+ )
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
diff --git a/setup.py b/setup.py
index c0df285dcffb..13c93dcae3c0 100644
--- a/setup.py
+++ b/setup.py
@@ -95,7 +95,6 @@
"Jinja2",
"k-diffusion>=0.0.12",
"librosa",
- "note-seq",
"numpy",
"parameterized",
"protobuf>=3.20.3,<4",
@@ -191,7 +190,6 @@ def run(self):
"Jinja2",
"k-diffusion",
"librosa",
- "note-seq",
"parameterized",
"pytest",
"pytest-timeout",
diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py
index 078d03eb8995..a8293ea77fef 100644
--- a/src/diffusers/__init__.py
+++ b/src/diffusers/__init__.py
@@ -134,6 +134,7 @@
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepth2ImgPipeline,
+ StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImg2ImgPipeline,
StableDiffusionInpaintPipeline,
diff --git a/src/diffusers/dependency_versions_table.py b/src/diffusers/dependency_versions_table.py
index 1269cf1578a6..0e714accacd6 100644
--- a/src/diffusers/dependency_versions_table.py
+++ b/src/diffusers/dependency_versions_table.py
@@ -19,7 +19,6 @@
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"librosa": "librosa",
- "note-seq": "note-seq",
"numpy": "numpy",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py
index 8e537c6f3680..fb5f6f48b324 100644
--- a/src/diffusers/models/attention.py
+++ b/src/diffusers/models/attention.py
@@ -71,6 +71,7 @@ def __init__(
self.proj_attn = nn.Linear(channels, channels, bias=True)
self._use_memory_efficient_attention_xformers = False
+ self._use_2_0_attn = True
self._attention_op = None
def reshape_heads_to_batch_dim(self, tensor, merge_head_and_batch=True):
@@ -142,9 +143,8 @@ def forward(self, hidden_states):
scale = 1 / math.sqrt(self.channels / self.num_heads)
- use_torch_2_0_attn = (
- hasattr(F, "scaled_dot_product_attention") and not self._use_memory_efficient_attention_xformers
- )
+ _use_2_0_attn = self._use_2_0_attn and not self._use_memory_efficient_attention_xformers
+ use_torch_2_0_attn = hasattr(F, "scaled_dot_product_attention") and _use_2_0_attn
query_proj = self.reshape_heads_to_batch_dim(query_proj, merge_head_and_batch=not use_torch_2_0_attn)
key_proj = self.reshape_heads_to_batch_dim(key_proj, merge_head_and_batch=not use_torch_2_0_attn)
diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py
index 521e99fdd69c..be8e1e5a6ed1 100644
--- a/src/diffusers/models/modeling_utils.py
+++ b/src/diffusers/models/modeling_utils.py
@@ -392,6 +392,12 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
+ offload_folder (`str` or `os.PathLike`, *optional*):
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
+ offload_state_dict (`bool`, *optional*):
+ If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU
+ RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to
+ `True` when there is some disk offload.
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
@@ -433,6 +439,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
torch_dtype = kwargs.pop("torch_dtype", None)
subfolder = kwargs.pop("subfolder", None)
device_map = kwargs.pop("device_map", None)
+ offload_folder = kwargs.pop("offload_folder", None)
+ offload_state_dict = kwargs.pop("offload_state_dict", False)
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
variant = kwargs.pop("variant", None)
use_safetensors = kwargs.pop("use_safetensors", None)
@@ -504,6 +512,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
revision=revision,
subfolder=subfolder,
device_map=device_map,
+ offload_folder=offload_folder,
+ offload_state_dict=offload_state_dict,
user_agent=user_agent,
**kwargs,
)
@@ -607,7 +617,14 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
else: # else let accelerate handle loading and dispatching.
# Load weights and dispatch according to the device_map
# by default the device_map is None and the weights are loaded on the CPU
- accelerate.load_checkpoint_and_dispatch(model, model_file, device_map, dtype=torch_dtype)
+ accelerate.load_checkpoint_and_dispatch(
+ model,
+ model_file,
+ device_map,
+ offload_folder=offload_folder,
+ offload_state_dict=offload_state_dict,
+ dtype=torch_dtype,
+ )
loading_info = {
"missing_keys": [],
diff --git a/src/diffusers/optimization.py b/src/diffusers/optimization.py
index 657e085062e0..78d68b7978a9 100644
--- a/src/diffusers/optimization.py
+++ b/src/diffusers/optimization.py
@@ -34,6 +34,7 @@ class SchedulerType(Enum):
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
+ PIECEWISE_CONSTANT = "piecewise_constant"
def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1):
@@ -77,6 +78,48 @@ def lr_lambda(current_step: int):
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
+def get_piecewise_constant_schedule(optimizer: Optimizer, step_rules: str, last_epoch: int = -1):
+ """
+ Create a schedule with a constant learning rate, using the learning rate set in optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ step_rules (`string`):
+ The rules for the learning rate. ex: rule_steps="1:10,0.1:20,0.01:30,0.005" it means that the learning rate
+ if multiple 1 for the first 10 steps, mutiple 0.1 for the next 20 steps, multiple 0.01 for the next 30
+ steps and multiple 0.005 for the other steps.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+
+ rules_dict = {}
+ rule_list = step_rules.split(",")
+ for rule_str in rule_list[:-1]:
+ value_str, steps_str = rule_str.split(":")
+ steps = int(steps_str)
+ value = float(value_str)
+ rules_dict[steps] = value
+ last_lr_multiple = float(rule_list[-1])
+
+ def create_rules_function(rules_dict, last_lr_multiple):
+ def rule_func(steps: int) -> float:
+ sorted_steps = sorted(rules_dict.keys())
+ for i, sorted_step in enumerate(sorted_steps):
+ if steps < sorted_step:
+ return rules_dict[sorted_steps[i]]
+ return last_lr_multiple
+
+ return rule_func
+
+ rules_func = create_rules_function(rules_dict, last_lr_multiple)
+
+ return LambdaLR(optimizer, rules_func, last_epoch=last_epoch)
+
+
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
@@ -232,12 +275,14 @@ def lr_lambda(current_step: int):
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
+ SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def get_scheduler(
name: Union[str, SchedulerType],
optimizer: Optimizer,
+ step_rules: Optional[str] = None,
num_warmup_steps: Optional[int] = None,
num_training_steps: Optional[int] = None,
num_cycles: int = 1,
@@ -252,6 +297,8 @@ def get_scheduler(
The name of the scheduler to use.
optimizer (`torch.optim.Optimizer`):
The optimizer that will be used during training.
+ step_rules (`str`, *optional*):
+ A string representing the step rules to use. This is only used by the `PIECEWISE_CONSTANT` scheduler.
num_warmup_steps (`int`, *optional*):
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
@@ -270,6 +317,9 @@ def get_scheduler(
if name == SchedulerType.CONSTANT:
return schedule_func(optimizer, last_epoch=last_epoch)
+ if name == SchedulerType.PIECEWISE_CONSTANT:
+ return schedule_func(optimizer, rules=step_rules, last_epoch=last_epoch)
+
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py
index 10da653a1377..3cddad4a6b26 100644
--- a/src/diffusers/pipelines/__init__.py
+++ b/src/diffusers/pipelines/__init__.py
@@ -60,6 +60,7 @@
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepth2ImgPipeline,
+ StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImg2ImgPipeline,
StableDiffusionInpaintPipeline,
diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py
index 5e4290e8db9f..01eed74636cb 100644
--- a/src/diffusers/pipelines/pipeline_utils.py
+++ b/src/diffusers/pipelines/pipeline_utils.py
@@ -355,6 +355,8 @@ def load_sub_model(
provider: Any,
sess_options: Any,
device_map: Optional[Union[Dict[str, torch.device], str]],
+ offload_folder: Optional[Union[str, os.PathLike]],
+ offload_state_dict: bool,
model_variants: Dict[str, str],
name: str,
from_flax: bool,
@@ -417,6 +419,8 @@ def load_sub_model(
# This makes sure that the weights won't be initialized which significantly speeds up loading.
if is_diffusers_model or is_transformers_model:
loading_kwargs["device_map"] = device_map
+ loading_kwargs["offload_folder"] = offload_folder
+ loading_kwargs["offload_state_dict"] = offload_state_dict
loading_kwargs["variant"] = model_variants.pop(name, None)
if from_flax:
loading_kwargs["from_flax"] = True
@@ -809,6 +813,12 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
+ offload_folder (`str` or `os.PathLike`, *optional*):
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
+ offload_state_dict (`bool`, *optional*):
+ If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU
+ RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to
+ `True` when there is some disk offload.
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
@@ -874,6 +884,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
provider = kwargs.pop("provider", None)
sess_options = kwargs.pop("sess_options", None)
device_map = kwargs.pop("device_map", None)
+ offload_folder = kwargs.pop("offload_folder", None)
+ offload_state_dict = kwargs.pop("offload_state_dict", False)
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
variant = kwargs.pop("variant", None)
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
@@ -1047,6 +1059,8 @@ def load_module(name, value):
provider=provider,
sess_options=sess_options,
device_map=device_map,
+ offload_folder=offload_folder,
+ offload_state_dict=offload_state_dict,
model_variants=model_variants,
name=name,
from_flax=from_flax,
diff --git a/src/diffusers/pipelines/stable_diffusion/__init__.py b/src/diffusers/pipelines/stable_diffusion/__init__.py
index 6bc2b58b5fef..b89dde319cb3 100644
--- a/src/diffusers/pipelines/stable_diffusion/__init__.py
+++ b/src/diffusers/pipelines/stable_diffusion/__init__.py
@@ -75,10 +75,12 @@ class StableDiffusionPipelineOutput(BaseOutput):
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepth2ImgPipeline,
+ StableDiffusionDiffEditPipeline,
StableDiffusionPix2PixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depth2img import StableDiffusionDepth2ImgPipeline
+ from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline
diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py
new file mode 100644
index 000000000000..9bef5269fa07
--- /dev/null
+++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py
@@ -0,0 +1,1530 @@
+# Copyright 2023 DiffEdit Authors and Pix2Pix Zero Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+from dataclasses import dataclass
+from typing import Any, Callable, Dict, List, Optional, Union
+
+import numpy as np
+import PIL
+import torch
+from packaging import version
+from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
+
+from ...configuration_utils import FrozenDict
+from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
+from ...models import AutoencoderKL, UNet2DConditionModel
+from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers
+from ...utils import (
+ PIL_INTERPOLATION,
+ BaseOutput,
+ deprecate,
+ is_accelerate_available,
+ is_accelerate_version,
+ logging,
+ randn_tensor,
+ replace_example_docstring,
+)
+from ..pipeline_utils import DiffusionPipeline
+from . import StableDiffusionPipelineOutput
+from .safety_checker import StableDiffusionSafetyChecker
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class DiffEditInversionPipelineOutput(BaseOutput):
+ """
+ Output class for Stable Diffusion pipelines.
+
+ Args:
+ latents (`torch.FloatTensor`)
+ inverted latents tensor
+ images (`List[PIL.Image.Image]` or `np.ndarray`)
+ List of denoised PIL images of length `num_timesteps * batch_size` or numpy array of shape `(num_timesteps,
+ batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the
+ diffusion pipeline.
+ """
+
+ latents: torch.FloatTensor
+ images: Union[List[PIL.Image.Image], np.ndarray]
+
+
+EXAMPLE_DOC_STRING = """
+
+ ```py
+ >>> import PIL
+ >>> import requests
+ >>> import torch
+ >>> from io import BytesIO
+
+ >>> from diffusers import StableDiffusionDiffEditPipeline
+
+
+ >>> def download_image(url):
+ ... response = requests.get(url)
+ ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
+
+
+ >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
+
+ >>> init_image = download_image(img_url).resize((768, 768))
+
+ >>> pipe = StableDiffusionDiffEditPipeline.from_pretrained(
+ ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
+ ... )
+ >>> pipe = pipe.to("cuda")
+
+ >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
+ >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
+ >>> pipeline.enable_model_cpu_offload()
+
+ >>> mask_prompt = "A bowl of fruits"
+ >>> prompt = "A bowl of pears"
+
+ >>> mask_image = pipe.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt)
+ >>> image_latents = pipe.invert(image=init_image, prompt=mask_prompt).latents
+ >>> image = pipe(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[0]
+ ```
+"""
+
+EXAMPLE_INVERT_DOC_STRING = """
+ ```py
+ >>> import PIL
+ >>> import requests
+ >>> import torch
+ >>> from io import BytesIO
+
+ >>> from diffusers import StableDiffusionDiffEditPipeline
+
+
+ >>> def download_image(url):
+ ... response = requests.get(url)
+ ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
+
+
+ >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
+
+ >>> init_image = download_image(img_url).resize((768, 768))
+
+ >>> pipe = StableDiffusionDiffEditPipeline.from_pretrained(
+ ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
+ ... )
+ >>> pipe = pipe.to("cuda")
+
+ >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
+ >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
+ >>> pipeline.enable_model_cpu_offload()
+
+ >>> prompt = "A bowl of fruits"
+
+ >>> inverted_latents = pipe.invert(image=init_image, prompt=prompt).latents
+ ```
+"""
+
+
+def auto_corr_loss(hidden_states, generator=None):
+ reg_loss = 0.0
+ for i in range(hidden_states.shape[0]):
+ for j in range(hidden_states.shape[1]):
+ noise = hidden_states[i : i + 1, j : j + 1, :, :]
+ while True:
+ roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item()
+ reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2
+ reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2
+
+ if noise.shape[2] <= 8:
+ break
+ noise = torch.nn.functional.avg_pool2d(noise, kernel_size=2)
+ return reg_loss
+
+
+def kl_divergence(hidden_states):
+ return hidden_states.var() + hidden_states.mean() ** 2 - 1 - torch.log(hidden_states.var() + 1e-7)
+
+
+# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
+def preprocess(image):
+ if isinstance(image, torch.Tensor):
+ return image
+ elif isinstance(image, PIL.Image.Image):
+ image = [image]
+
+ if isinstance(image[0], PIL.Image.Image):
+ w, h = image[0].size
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
+
+ image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
+ image = np.concatenate(image, axis=0)
+ image = np.array(image).astype(np.float32) / 255.0
+ image = image.transpose(0, 3, 1, 2)
+ image = 2.0 * image - 1.0
+ image = torch.from_numpy(image)
+ elif isinstance(image[0], torch.Tensor):
+ image = torch.cat(image, dim=0)
+ return image
+
+
+def preprocess_mask(mask, batch_size: int = 1):
+ if not isinstance(mask, torch.Tensor):
+ # preprocess mask
+ if isinstance(mask, PIL.Image.Image) or isinstance(mask, np.ndarray):
+ mask = [mask]
+
+ if isinstance(mask, list):
+ if isinstance(mask[0], PIL.Image.Image):
+ mask = [np.array(m.convert("L")).astype(np.float32) / 255.0 for m in mask]
+ if isinstance(mask[0], np.ndarray):
+ mask = np.stack(mask, axis=0) if mask[0].ndim < 3 else np.concatenate(mask, axis=0)
+ mask = torch.from_numpy(mask)
+ elif isinstance(mask[0], torch.Tensor):
+ mask = torch.stack(mask, dim=0) if mask[0].ndim < 3 else torch.cat(mask, dim=0)
+
+ # Batch and add channel dim for single mask
+ if mask.ndim == 2:
+ mask = mask.unsqueeze(0).unsqueeze(0)
+
+ # Batch single mask or add channel dim
+ if mask.ndim == 3:
+ # Single batched mask, no channel dim or single mask not batched but channel dim
+ if mask.shape[0] == 1:
+ mask = mask.unsqueeze(0)
+
+ # Batched masks no channel dim
+ else:
+ mask = mask.unsqueeze(1)
+
+ # Check mask shape
+ if batch_size > 1:
+ if mask.shape[0] == 1:
+ mask = torch.cat([mask] * batch_size)
+ elif mask.shape[0] > 1 and mask.shape[0] != batch_size:
+ raise ValueError(
+ f"`mask_image` with batch size {mask.shape[0]} cannot be broadcasted to batch size {batch_size} "
+ f"inferred by prompt inputs"
+ )
+
+ if mask.shape[1] != 1:
+ raise ValueError(f"`mask_image` must have 1 channel, but has {mask.shape[1]} channels")
+
+ # Check mask is in [0, 1]
+ if mask.min() < 0 or mask.max() > 1:
+ raise ValueError("`mask_image` should be in [0, 1] range")
+
+ # Binarize mask
+ mask[mask < 0.5] = 0
+ mask[mask >= 0.5] = 1
+
+ return mask
+
+
+class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
+ r"""
+ Pipeline for text-guided image inpainting using Stable Diffusion using DiffEdit. *This is an experimental feature*.
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ In addition the pipeline inherits the following loading methods:
+ - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
+ - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
+
+ as well as the following saving methods:
+ - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
+
+ Args:
+ vae ([`AutoencoderKL`]):
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
+ text_encoder ([`CLIPTextModel`]):
+ Frozen text-encoder. Stable Diffusion uses the text portion of
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
+ tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
+ scheduler ([`SchedulerMixin`]):
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents.
+ inverse_scheduler (`[DDIMInverseScheduler]`):
+ A scheduler to be used in combination with `unet` to fill in the unmasked part of the input latents
+ safety_checker ([`StableDiffusionSafetyChecker`]):
+ Classification module that estimates whether generated images could be considered offensive or harmful.
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
+ feature_extractor ([`CLIPImageProcessor`]):
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
+ """
+ _optional_components = ["safety_checker", "feature_extractor", "inverse_scheduler"]
+
+ def __init__(
+ self,
+ vae: AutoencoderKL,
+ text_encoder: CLIPTextModel,
+ tokenizer: CLIPTokenizer,
+ unet: UNet2DConditionModel,
+ scheduler: KarrasDiffusionSchedulers,
+ safety_checker: StableDiffusionSafetyChecker,
+ feature_extractor: CLIPImageProcessor,
+ inverse_scheduler: DDIMInverseScheduler,
+ requires_safety_checker: bool = True,
+ ):
+ super().__init__()
+
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
+ deprecation_message = (
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
+ " file"
+ )
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
+ new_config = dict(scheduler.config)
+ new_config["steps_offset"] = 1
+ scheduler._internal_dict = FrozenDict(new_config)
+
+ if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
+ deprecation_message = (
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration"
+ " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
+ " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
+ " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
+ " Hub, it would be very nice if you could open a Pull request for the"
+ " `scheduler/scheduler_config.json` file"
+ )
+ deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
+ new_config = dict(scheduler.config)
+ new_config["skip_prk_steps"] = True
+ scheduler._internal_dict = FrozenDict(new_config)
+
+ if safety_checker is None and requires_safety_checker:
+ logger.warning(
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
+ )
+
+ if safety_checker is not None and feature_extractor is None:
+ raise ValueError(
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
+ )
+
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
+ version.parse(unet.config._diffusers_version).base_version
+ ) < version.parse("0.9.0.dev0")
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
+ deprecation_message = (
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
+ " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
+ " the `unet/config.json` file"
+ )
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
+ new_config = dict(unet.config)
+ new_config["sample_size"] = 64
+ unet._internal_dict = FrozenDict(new_config)
+
+ self.register_modules(
+ vae=vae,
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ safety_checker=safety_checker,
+ feature_extractor=feature_extractor,
+ inverse_scheduler=inverse_scheduler,
+ )
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
+ def enable_vae_slicing(self):
+ r"""
+ Enable sliced VAE decoding.
+
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
+ steps. This is useful to save some memory and allow larger batch sizes.
+ """
+ self.vae.enable_slicing()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
+ def disable_vae_slicing(self):
+ r"""
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
+ computing decoding in one step.
+ """
+ self.vae.disable_slicing()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
+ def enable_vae_tiling(self):
+ r"""
+ Enable tiled VAE decoding.
+
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
+ """
+ self.vae.enable_tiling()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
+ def disable_vae_tiling(self):
+ r"""
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
+ computing decoding in one step.
+ """
+ self.vae.disable_tiling()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
+ def enable_sequential_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
+ `enable_model_cpu_offload`, but performance is lower.
+ """
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
+ from accelerate import cpu_offload
+ else:
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ if self.device.type != "cpu":
+ self.to("cpu", silence_dtype_warnings=True)
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
+
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
+ cpu_offload(cpu_offloaded_model, device)
+
+ if self.safety_checker is not None:
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
+ from accelerate import cpu_offload_with_hook
+ else:
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ if self.device.type != "cpu":
+ self.to("cpu", silence_dtype_warnings=True)
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
+
+ hook = None
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
+
+ if self.safety_checker is not None:
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
+
+ # We'll offload the last model manually.
+ self.final_offload_hook = hook
+
+ @property
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
+ def _execution_device(self):
+ r"""
+ Returns the device on which the pipeline's models will be executed. After calling
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
+ hooks.
+ """
+ if not hasattr(self.unet, "_hf_hook"):
+ return self.device
+ for module in self.unet.modules():
+ if (
+ hasattr(module, "_hf_hook")
+ and hasattr(module._hf_hook, "execution_device")
+ and module._hf_hook.execution_device is not None
+ ):
+ return torch.device(module._hf_hook.execution_device)
+ return self.device
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
+ def _encode_prompt(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ ):
+ r"""
+ Encodes the prompt into text encoder hidden states.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ prompt to be encoded
+ device: (`torch.device`):
+ torch device
+ num_images_per_prompt (`int`):
+ number of images that should be generated per prompt
+ do_classifier_free_guidance (`bool`):
+ whether to use classifier free guidance or not
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ """
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ if prompt_embeds is None:
+ # textual inversion: procecss multi-vector tokens if necessary
+ if isinstance(self, TextualInversionLoaderMixin):
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
+
+ text_inputs = self.tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=self.tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ text_input_ids = text_inputs.input_ids
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
+
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
+ text_input_ids, untruncated_ids
+ ):
+ removed_text = self.tokenizer.batch_decode(
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
+ )
+ logger.warning(
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
+ )
+
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
+ attention_mask = text_inputs.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ prompt_embeds = self.text_encoder(
+ text_input_ids.to(device),
+ attention_mask=attention_mask,
+ )
+ prompt_embeds = prompt_embeds[0]
+
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
+
+ bs_embed, seq_len, _ = prompt_embeds.shape
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
+
+ # get unconditional embeddings for classifier free guidance
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ uncond_tokens: List[str]
+ if negative_prompt is None:
+ uncond_tokens = [""] * batch_size
+ elif type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif isinstance(negative_prompt, str):
+ uncond_tokens = [negative_prompt]
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+ else:
+ uncond_tokens = negative_prompt
+
+ # textual inversion: procecss multi-vector tokens if necessary
+ if isinstance(self, TextualInversionLoaderMixin):
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
+
+ max_length = prompt_embeds.shape[1]
+ uncond_input = self.tokenizer(
+ uncond_tokens,
+ padding="max_length",
+ max_length=max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
+ attention_mask = uncond_input.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ negative_prompt_embeds = self.text_encoder(
+ uncond_input.input_ids.to(device),
+ attention_mask=attention_mask,
+ )
+ negative_prompt_embeds = negative_prompt_embeds[0]
+
+ if do_classifier_free_guidance:
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
+ seq_len = negative_prompt_embeds.shape[1]
+
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
+
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
+
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ return prompt_embeds
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
+ def run_safety_checker(self, image, device, dtype):
+ if self.safety_checker is not None:
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
+ image, has_nsfw_concept = self.safety_checker(
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
+ )
+ else:
+ has_nsfw_concept = None
+ return image, has_nsfw_concept
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
+ def prepare_extra_step_kwargs(self, generator, eta):
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
+ # and should be between [0, 1]
+
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ extra_step_kwargs = {}
+ if accepts_eta:
+ extra_step_kwargs["eta"] = eta
+
+ # check if the scheduler accepts generator
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ if accepts_generator:
+ extra_step_kwargs["generator"] = generator
+ return extra_step_kwargs
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
+ def decode_latents(self, latents):
+ latents = 1 / self.vae.config.scaling_factor * latents
+ image = self.vae.decode(latents).sample
+ image = (image / 2 + 0.5).clamp(0, 1)
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
+ return image
+
+ def check_inputs(
+ self,
+ prompt,
+ strength,
+ callback_steps,
+ negative_prompt=None,
+ prompt_embeds=None,
+ negative_prompt_embeds=None,
+ ):
+ if (strength is None) or (strength is not None and (strength < 0 or strength > 1)):
+ raise ValueError(
+ f"The value of `strength` should in [0.0, 1.0] but is, but is {strength} of type {type(strength)}."
+ )
+
+ if (callback_steps is None) or (
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
+ ):
+ raise ValueError(
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
+ f" {type(callback_steps)}."
+ )
+
+ if prompt is not None and prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
+ " only forward one of the two."
+ )
+ elif prompt is None and prompt_embeds is None:
+ raise ValueError(
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
+ )
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
+
+ if negative_prompt is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
+ raise ValueError(
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
+ f" {negative_prompt_embeds.shape}."
+ )
+
+ def check_source_inputs(
+ self,
+ source_prompt=None,
+ source_negative_prompt=None,
+ source_prompt_embeds=None,
+ source_negative_prompt_embeds=None,
+ ):
+ if source_prompt is not None and source_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `source_prompt`: {source_prompt} and `source_prompt_embeds`: {source_prompt_embeds}."
+ " Please make sure to only forward one of the two."
+ )
+ elif source_prompt is None and source_prompt_embeds is None:
+ raise ValueError(
+ "Provide either `source_image` or `source_prompt_embeds`. Cannot leave all both of the arguments undefined."
+ )
+ elif source_prompt is not None and (
+ not isinstance(source_prompt, str) and not isinstance(source_prompt, list)
+ ):
+ raise ValueError(f"`source_prompt` has to be of type `str` or `list` but is {type(source_prompt)}")
+
+ if source_negative_prompt is not None and source_negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `source_negative_prompt`: {source_negative_prompt} and `source_negative_prompt_embeds`:"
+ f" {source_negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+
+ if source_prompt_embeds is not None and source_negative_prompt_embeds is not None:
+ if source_prompt_embeds.shape != source_negative_prompt_embeds.shape:
+ raise ValueError(
+ "`source_prompt_embeds` and `source_negative_prompt_embeds` must have the same shape when passed"
+ f" directly, but got: `source_prompt_embeds` {source_prompt_embeds.shape} !="
+ f" `source_negative_prompt_embeds` {source_negative_prompt_embeds.shape}."
+ )
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
+ def get_timesteps(self, num_inference_steps, strength, device):
+ # get the original timestep using init_timestep
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
+
+ t_start = max(num_inference_steps - init_timestep, 0)
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
+
+ return timesteps, num_inference_steps - t_start
+
+ def get_inverse_timesteps(self, num_inference_steps, strength, device):
+ # get the original timestep using init_timestep
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
+
+ t_start = max(num_inference_steps - init_timestep, 0)
+
+ # safety for t_start overflow to prevent empty timsteps slice
+ if t_start == 0:
+ return self.inverse_scheduler.timesteps, num_inference_steps
+ timesteps = self.inverse_scheduler.timesteps[:-t_start]
+
+ return timesteps, num_inference_steps - t_start
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ if latents is None:
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ else:
+ latents = latents.to(device)
+
+ # scale the initial noise by the standard deviation required by the scheduler
+ latents = latents * self.scheduler.init_noise_sigma
+ return latents
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_pix2pix_zero.StableDiffusionPix2PixZeroPipeline.prepare_image_latents
+ def prepare_image_latents(self, image, batch_size, dtype, device, generator=None):
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
+ raise ValueError(
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
+ )
+
+ image = image.to(device=device, dtype=dtype)
+
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ if isinstance(generator, list):
+ latents = [self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)]
+ latents = torch.cat(latents, dim=0)
+ else:
+ latents = self.vae.encode(image).latent_dist.sample(generator)
+
+ latents = self.vae.config.scaling_factor * latents
+
+ if batch_size != latents.shape[0]:
+ if batch_size % latents.shape[0] == 0:
+ # expand image_latents for batch_size
+ deprecation_message = (
+ f"You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial"
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
+ " your script to pass as many initial images as text prompts to suppress this warning."
+ )
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
+ additional_latents_per_image = batch_size // latents.shape[0]
+ latents = torch.cat([latents] * additional_latents_per_image, dim=0)
+ else:
+ raise ValueError(
+ f"Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts."
+ )
+ else:
+ latents = torch.cat([latents], dim=0)
+
+ return latents
+
+ def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int):
+ pred_type = self.inverse_scheduler.config.prediction_type
+ alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep]
+
+ beta_prod_t = 1 - alpha_prod_t
+
+ if pred_type == "epsilon":
+ return model_output
+ elif pred_type == "sample":
+ return (sample - alpha_prod_t ** (0.5) * model_output) / beta_prod_t ** (0.5)
+ elif pred_type == "v_prediction":
+ return (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
+ else:
+ raise ValueError(
+ f"prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`"
+ )
+
+ @torch.no_grad()
+ def generate_mask(
+ self,
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
+ target_prompt: Optional[Union[str, List[str]]] = None,
+ target_negative_prompt: Optional[Union[str, List[str]]] = None,
+ target_prompt_embeds: Optional[torch.FloatTensor] = None,
+ target_negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ source_prompt: Optional[Union[str, List[str]]] = None,
+ source_negative_prompt: Optional[Union[str, List[str]]] = None,
+ source_prompt_embeds: Optional[torch.FloatTensor] = None,
+ source_negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ num_maps_per_mask: Optional[int] = 10,
+ mask_encode_strength: Optional[float] = 0.5,
+ mask_thresholding_ratio: Optional[float] = 3.0,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ output_type: Optional[str] = "np",
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ ):
+ r"""
+ Function used to generate a latent mask given a mask prompt, a target prompt, and an image.
+
+ Args:
+ image (`PIL.Image.Image`):
+ `Image`, or tensor representing an image batch which will be used for computing the mask.
+ target_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the semantic mask generation. If not defined, one has to pass
+ `prompt_embeds`. instead.
+ target_negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
+ is less than `1`).
+ target_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ target_negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ source_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the semantic mask generation using the method in [DiffEdit:
+ Diffusion-Based Semantic Image Editing with Mask Guidance](https://arxiv.org/pdf/2210.11427.pdf). If
+ not defined, one has to pass `source_prompt_embeds` or `source_image` instead.
+ source_negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the semantic mask generation away from using the method in [DiffEdit:
+ Diffusion-Based Semantic Image Editing with Mask Guidance](https://arxiv.org/pdf/2210.11427.pdf). If
+ not defined, one has to pass `source_negative_prompt_embeds` or `source_image` instead.
+ source_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings to guide the semantic mask generation. Can be used to easily tweak text
+ inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from
+ `source_prompt` input argument.
+ source_negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings to negatively guide the semantic mask generation. Can be used to easily
+ tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from
+ `source_negative_prompt` input argument.
+ num_maps_per_mask (`int`, *optional*, defaults to 10):
+ The number of noise maps sampled to generate the semantic mask using the method in [DiffEdit:
+ Diffusion-Based Semantic Image Editing with Mask Guidance](https://arxiv.org/pdf/2210.11427.pdf).
+ mask_encode_strength (`float`, *optional*, defaults to 0.5):
+ Conceptually, the strength of the noise maps sampled to generate the semantic mask using the method in
+ [DiffEdit: Diffusion-Based Semantic Image Editing with Mask Guidance](
+ https://arxiv.org/pdf/2210.11427.pdf). Must be between 0 and 1.
+ mask_thresholding_ratio (`float`, *optional*, defaults to 3.0):
+ The maximum multiple of the mean absolute difference used to clamp the semantic guidance map before
+ mask binarization.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
+
+ Examples:
+
+ Returns:
+ `List[PIL.Image.Image]` or `np.array`: `List[PIL.Image.Image]` if `output_type` is `"pil"`, otherwise a
+ `np.array`. When returning a `List[PIL.Image.Image]`, the list will consist of a batch of single-channel
+ binary image with dimensions `(height // self.vae_scale_factor, width // self.vae_scale_factor)`, otherwise
+ the `np.array` will have shape `(batch_size, height // self.vae_scale_factor, width //
+ self.vae_scale_factor)`.
+ """
+
+ # 1. Check inputs (Provide dummy argument for callback_steps)
+ self.check_inputs(
+ target_prompt,
+ mask_encode_strength,
+ 1,
+ target_negative_prompt,
+ target_prompt_embeds,
+ target_negative_prompt_embeds,
+ )
+
+ self.check_source_inputs(
+ source_prompt,
+ source_negative_prompt,
+ source_prompt_embeds,
+ source_negative_prompt_embeds,
+ )
+
+ if (num_maps_per_mask is None) or (
+ num_maps_per_mask is not None and (not isinstance(num_maps_per_mask, int) or num_maps_per_mask <= 0)
+ ):
+ raise ValueError(
+ f"`num_maps_per_mask` has to be a positive integer but is {num_maps_per_mask} of type"
+ f" {type(num_maps_per_mask)}."
+ )
+
+ if mask_thresholding_ratio is None or mask_thresholding_ratio <= 0:
+ raise ValueError(
+ f"`mask_thresholding_ratio` has to be positive but is {mask_thresholding_ratio} of type"
+ f" {type(mask_thresholding_ratio)}."
+ )
+
+ # 2. Define call parameters
+ if target_prompt is not None and isinstance(target_prompt, str):
+ batch_size = 1
+ elif target_prompt is not None and isinstance(target_prompt, list):
+ batch_size = len(target_prompt)
+ else:
+ batch_size = target_prompt_embeds.shape[0]
+ if cross_attention_kwargs is None:
+ cross_attention_kwargs = {}
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ # 3. Encode input prompts
+ target_prompt_embeds = self._encode_prompt(
+ target_prompt,
+ device,
+ num_maps_per_mask,
+ do_classifier_free_guidance,
+ target_negative_prompt,
+ prompt_embeds=target_prompt_embeds,
+ negative_prompt_embeds=target_negative_prompt_embeds,
+ )
+
+ source_prompt_embeds = self._encode_prompt(
+ source_prompt,
+ device,
+ num_maps_per_mask,
+ do_classifier_free_guidance,
+ source_negative_prompt,
+ prompt_embeds=source_prompt_embeds,
+ negative_prompt_embeds=source_negative_prompt_embeds,
+ )
+
+ # 4. Preprocess image
+ image = preprocess(image).repeat_interleave(num_maps_per_mask, dim=0)
+
+ # 5. Set timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps, _ = self.get_timesteps(num_inference_steps, mask_encode_strength, device)
+ encode_timestep = timesteps[0]
+
+ # 6. Prepare image latents and add noise with specified strength
+ image_latents = self.prepare_image_latents(
+ image, batch_size * num_maps_per_mask, self.vae.dtype, device, generator
+ )
+ noise = randn_tensor(image_latents.shape, generator=generator, device=device, dtype=self.vae.dtype)
+ image_latents = self.scheduler.add_noise(image_latents, noise, encode_timestep)
+
+ latent_model_input = torch.cat([image_latents] * (4 if do_classifier_free_guidance else 2))
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, encode_timestep)
+
+ # 7. Predict the noise residual
+ prompt_embeds = torch.cat([source_prompt_embeds, target_prompt_embeds])
+ noise_pred = self.unet(
+ latent_model_input,
+ encode_timestep,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=cross_attention_kwargs,
+ ).sample
+
+ if do_classifier_free_guidance:
+ noise_pred_neg_src, noise_pred_source, noise_pred_uncond, noise_pred_target = noise_pred.chunk(4)
+ noise_pred_source = noise_pred_neg_src + guidance_scale * (noise_pred_source - noise_pred_neg_src)
+ noise_pred_target = noise_pred_uncond + guidance_scale * (noise_pred_target - noise_pred_uncond)
+ else:
+ noise_pred_source, noise_pred_target = noise_pred.chunk(2)
+
+ # 8. Compute the mask from the absolute difference of predicted noise residuals
+ # TODO: Consider smoothing mask guidance map
+ mask_guidance_map = (
+ torch.abs(noise_pred_target - noise_pred_source)
+ .reshape(batch_size, num_maps_per_mask, *noise_pred_target.shape[-3:])
+ .mean([1, 2])
+ )
+ clamp_magnitude = mask_guidance_map.mean() * mask_thresholding_ratio
+ semantic_mask_image = mask_guidance_map.clamp(0, clamp_magnitude) / clamp_magnitude
+ semantic_mask_image = torch.where(semantic_mask_image <= 0.5, 0, 1)
+ mask_image = semantic_mask_image.cpu().numpy()
+
+ # 9. Convert to Numpy array or PIL.
+ if output_type == "pil":
+ mask_image = self.numpy_to_pil(mask_image)
+
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
+ return mask_image
+
+ @torch.no_grad()
+ @replace_example_docstring(EXAMPLE_INVERT_DOC_STRING)
+ def invert(
+ self,
+ prompt: Optional[Union[str, List[str]]] = None,
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
+ num_inference_steps: int = 50,
+ inpaint_strength: float = 0.8,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ decode_latents: bool = False,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: Optional[int] = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ lambda_auto_corr: float = 20.0,
+ lambda_kl: float = 20.0,
+ num_reg_steps: int = 0,
+ num_auto_corr_rolls: int = 5,
+ ):
+ r"""
+ Function used to generate inverted latents given a prompt and image.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
+ instead.
+ image (`PIL.Image.Image`):
+ `Image`, or tensor representing an image batch to produce the inverted latents, guided by `prompt`.
+ inpaint_strength (`float`, *optional*, defaults to 0.8):
+ Conceptually, indicates how far into the noising process to run latent inversion. Must be between 0 and
+ 1. When `strength` is 1, the inversion process will be run for the full number of iterations specified
+ in `num_inference_steps`. `image` will be used as a reference for the inversion process, adding more
+ noise the larger the `strength`. If `strength` is 0, no inpainting will occur.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
+ is less than `1`).
+ generator (`torch.Generator`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ decode_latents (`bool`, *optional*, defaults to `False`):
+ Whether or not to decode the inverted latents into a generated image. Setting this argument to `True`
+ will decode all inverted latents for each timestep into a list of generated images.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.DiffEditInversionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
+ lambda_auto_corr (`float`, *optional*, defaults to 20.0):
+ Lambda parameter to control auto correction
+ lambda_kl (`float`, *optional*, defaults to 20.0):
+ Lambda parameter to control Kullback–Leibler divergence output
+ num_reg_steps (`int`, *optional*, defaults to 0):
+ Number of regularization loss steps
+ num_auto_corr_rolls (`int`, *optional*, defaults to 5):
+ Number of auto correction roll steps
+
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] or
+ `tuple`: [`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`]
+ if `return_dict` is `True`, otherwise a `tuple`. When returning a tuple, the first element is the inverted
+ latents tensors ordered by increasing noise, and then second is the corresponding decoded images if
+ `decode_latents` is `True`, otherwise `None`.
+ """
+
+ # 1. Check inputs
+ self.check_inputs(
+ prompt,
+ inpaint_strength,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ )
+
+ if image is None:
+ raise ValueError("`image` input cannot be undefined.")
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+ if cross_attention_kwargs is None:
+ cross_attention_kwargs = {}
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ # 3. Preprocess image
+ image = preprocess(image)
+
+ # 4. Prepare latent variables
+ num_images_per_prompt = 1
+ latents = self.prepare_image_latents(
+ image, batch_size * num_images_per_prompt, self.vae.dtype, device, generator
+ )
+
+ # 5. Encode input prompt
+ prompt_embeds = self._encode_prompt(
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ )
+
+ # 6. Prepare timesteps
+ self.inverse_scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps, num_inference_steps = self.get_inverse_timesteps(num_inference_steps, inpaint_strength, device)
+
+ # 7. Noising loop where we obtain the intermediate noised latent image for each timestep.
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order
+ inverted_latents = [latents.detach().clone()]
+ with self.progress_bar(total=num_inference_steps - 1) as progress_bar:
+ for i, t in enumerate(timesteps[:-1]):
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
+ latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t)
+
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=cross_attention_kwargs,
+ ).sample
+
+ # perform guidance
+ if do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # regularization of the noise prediction (not in original code or paper but borrowed from Pix2PixZero)
+ if num_reg_steps > 0:
+ with torch.enable_grad():
+ for _ in range(num_reg_steps):
+ if lambda_auto_corr > 0:
+ for _ in range(num_auto_corr_rolls):
+ var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True)
+
+ # Derive epsilon from model output before regularizing to IID standard normal
+ var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t)
+
+ l_ac = auto_corr_loss(var_epsilon, generator=generator)
+ l_ac.backward()
+
+ grad = var.grad.detach() / num_auto_corr_rolls
+ noise_pred = noise_pred - lambda_auto_corr * grad
+
+ if lambda_kl > 0:
+ var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True)
+
+ # Derive epsilon from model output before regularizing to IID standard normal
+ var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t)
+
+ l_kld = kl_divergence(var_epsilon)
+ l_kld.backward()
+
+ grad = var.grad.detach()
+ noise_pred = noise_pred - lambda_kl * grad
+
+ noise_pred = noise_pred.detach()
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample
+ inverted_latents.append(latents.detach().clone())
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or (
+ (i + 1) > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0
+ ):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
+ assert len(inverted_latents) == len(timesteps)
+ latents = torch.stack(list(reversed(inverted_latents)), 1)
+
+ # 8. Post-processing
+ image = None
+ if decode_latents:
+ image = self.decode_latents(latents.flatten(0, 1).detach())
+
+ # 9. Convert to PIL.
+ if decode_latents and output_type == "pil":
+ image = self.numpy_to_pil(image)
+
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
+ if not return_dict:
+ return (latents, image)
+
+ return DiffEditInversionPipelineOutput(latents=latents, images=image)
+
+ @torch.no_grad()
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Optional[Union[str, List[str]]] = None,
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
+ image_latents: torch.FloatTensor = None,
+ inpaint_strength: Optional[float] = 0.8,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ ):
+ r"""
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
+ instead.
+ mask_image (`PIL.Image.Image`):
+ `Image`, or tensor representing an image batch, to mask the generated image. White pixels in the mask
+ will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be
+ converted to a single channel (luminance) before use. If it's a tensor, it should contain one color
+ channel (L) instead of 3, so the expected shape would be `(B, 1, H, W)`.
+ image_latents (`PIL.Image.Image` or `torch.FloatTensor`):
+ Partially noised image latents from the inversion process to be used as inputs for image generation.
+ inpaint_strength (`float`, *optional*, defaults to 0.8):
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
+ in `num_inference_steps`. `image_latents` will be used as a reference for the masked area, adding more
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
+ is less than `1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
+ [`schedulers.DDIMScheduler`], will be ignored for others.
+ generator (`torch.Generator`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
+
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
+ (nsfw) content, according to the `safety_checker`.
+ """
+
+ # 1. Check inputs
+ self.check_inputs(
+ prompt,
+ inpaint_strength,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ )
+
+ if mask_image is None:
+ raise ValueError(
+ "`mask_image` input cannot be undefined. Use `generate_mask()` to compute `mask_image` from text prompts."
+ )
+ if image_latents is None:
+ raise ValueError(
+ "`image_latents` input cannot be undefined. Use `invert()` to compute `image_latents` from input images."
+ )
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+ if cross_attention_kwargs is None:
+ cross_attention_kwargs = {}
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ # 3. Encode input prompt
+ prompt_embeds = self._encode_prompt(
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ )
+
+ # 4. Preprocess mask
+ mask_image = preprocess_mask(mask_image, batch_size)
+ latent_height, latent_width = mask_image.shape[-2:]
+ mask_image = torch.cat([mask_image] * num_images_per_prompt)
+ mask_image = mask_image.to(device=device, dtype=prompt_embeds.dtype)
+
+ # 5. Set timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, inpaint_strength, device)
+
+ # 6. Preprocess image latents
+ image_latents = preprocess(image_latents)
+ latent_shape = (self.vae.config.latent_channels, latent_height, latent_width)
+ if image_latents.shape[-3:] != latent_shape:
+ raise ValueError(
+ f"Each latent image in `image_latents` must have shape {latent_shape}, "
+ f"but has shape {image_latents.shape[-3:]}"
+ )
+ if image_latents.ndim == 4:
+ image_latents = image_latents.reshape(batch_size, len(timesteps), *latent_shape)
+ if image_latents.shape[:2] != (batch_size, len(timesteps)):
+ raise ValueError(
+ f"`image_latents` must have batch size {batch_size} with latent images from {len(timesteps)} timesteps, "
+ f"but has batch size {image_latents.shape[0]} with latent images from {image_latents.shape[1]} timesteps."
+ )
+ image_latents = image_latents.transpose(0, 1).repeat_interleave(num_images_per_prompt, dim=1)
+ image_latents = image_latents.to(device=device, dtype=prompt_embeds.dtype)
+
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 8. Denoising loop
+ latents = image_latents[0].detach().clone()
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=cross_attention_kwargs,
+ ).sample
+
+ # perform guidance
+ if do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
+
+ # mask with inverted latents from appropriate timestep - use original image latent for last step
+ latents = latents * mask_image + image_latents[i] * (1 - mask_image)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
+ # 9. Post-processing
+ image = self.decode_latents(latents)
+
+ # 10. Run safety checker
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
+
+ # 11. Convert to PIL
+ if output_type == "pil":
+ image = self.numpy_to_pil(image)
+
+ # Offload last model to CPU
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
index bf4fe8d87ff9..f3708107e82a 100644
--- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py
+++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
@@ -242,6 +242,21 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
+class StableDiffusionDiffEditPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class StableDiffusionImageVariationPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
diff --git a/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py b/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py
index 3b64ea2d2fc1..3ec6f681be79 100644
--- a/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py
+++ b/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py
@@ -34,6 +34,10 @@
MIDI_FILE = "./tests/fixtures/elise_format0.mid"
+# The note-seq package throws an error on import because the default installed version of Ipython
+# is not compatible with python 3.8 which we run in the CI.
+# https://github.com/huggingface/diffusers/actions/runs/4830121056/jobs/8605954838#step:7:98
+@unittest.skip("The note-seq package currently throws an error on import")
class SpectrogramDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = SpectrogramDiffusionPipeline
required_optional_params = PipelineTesterMixin.required_optional_params - {
diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py
new file mode 100644
index 000000000000..c20bc3b47d7b
--- /dev/null
+++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py
@@ -0,0 +1,315 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import gc
+import random
+import tempfile
+import unittest
+
+import numpy as np
+import torch
+from PIL import Image
+from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
+
+from diffusers import (
+ AutoencoderKL,
+ DDIMInverseScheduler,
+ DDIMScheduler,
+ StableDiffusionDiffEditPipeline,
+ UNet2DConditionModel,
+)
+from diffusers.utils import load_image, slow
+from diffusers.utils.testing_utils import floats_tensor, require_torch_gpu, torch_device
+
+from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
+from ..test_pipelines_common import PipelineTesterMixin
+
+
+torch.backends.cuda.matmul.allow_tf32 = False
+
+
+class StableDiffusionDiffEditPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
+ pipeline_class = StableDiffusionDiffEditPipeline
+ params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
+ batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
+
+ def get_dummy_components(self):
+ torch.manual_seed(0)
+ unet = UNet2DConditionModel(
+ block_out_channels=(32, 64),
+ layers_per_block=2,
+ sample_size=32,
+ in_channels=4,
+ out_channels=4,
+ down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
+ up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
+ cross_attention_dim=32,
+ # SD2-specific config below
+ attention_head_dim=(2, 4),
+ use_linear_projection=True,
+ )
+ scheduler = DDIMScheduler(
+ beta_start=0.00085,
+ beta_end=0.012,
+ beta_schedule="scaled_linear",
+ clip_sample=False,
+ set_alpha_to_one=False,
+ )
+ inverse_scheduler = DDIMInverseScheduler(
+ beta_start=0.00085,
+ beta_end=0.012,
+ beta_schedule="scaled_linear",
+ clip_sample=False,
+ set_alpha_to_zero=False,
+ )
+ torch.manual_seed(0)
+ vae = AutoencoderKL(
+ block_out_channels=[32, 64],
+ in_channels=3,
+ out_channels=3,
+ down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
+ up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
+ latent_channels=4,
+ sample_size=128,
+ )
+ torch.manual_seed(0)
+ text_encoder_config = CLIPTextConfig(
+ bos_token_id=0,
+ eos_token_id=2,
+ hidden_size=32,
+ intermediate_size=37,
+ layer_norm_eps=1e-05,
+ num_attention_heads=4,
+ num_hidden_layers=5,
+ pad_token_id=1,
+ vocab_size=1000,
+ # SD2-specific config below
+ hidden_act="gelu",
+ projection_dim=512,
+ )
+ text_encoder = CLIPTextModel(text_encoder_config)
+ tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
+
+ components = {
+ "unet": unet,
+ "scheduler": scheduler,
+ "inverse_scheduler": inverse_scheduler,
+ "vae": vae,
+ "text_encoder": text_encoder,
+ "tokenizer": tokenizer,
+ "safety_checker": None,
+ "feature_extractor": None,
+ }
+
+ return components
+
+ def get_dummy_inputs(self, device, seed=0):
+ mask = floats_tensor((1, 16, 16), rng=random.Random(seed)).to(device)
+ latents = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(seed)).to(device)
+ if str(device).startswith("mps"):
+ generator = torch.manual_seed(seed)
+ else:
+ generator = torch.Generator(device=device).manual_seed(seed)
+ inputs = {
+ "prompt": "a dog and a newt",
+ "mask_image": mask,
+ "image_latents": latents,
+ "generator": generator,
+ "num_inference_steps": 2,
+ "inpaint_strength": 1.0,
+ "guidance_scale": 6.0,
+ "output_type": "numpy",
+ }
+
+ return inputs
+
+ def get_dummy_mask_inputs(self, device, seed=0):
+ image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
+ image = image.cpu().permute(0, 2, 3, 1)[0]
+ image = Image.fromarray(np.uint8(image)).convert("RGB")
+ if str(device).startswith("mps"):
+ generator = torch.manual_seed(seed)
+ else:
+ generator = torch.Generator(device=device).manual_seed(seed)
+ inputs = {
+ "image": image,
+ "source_prompt": "a cat and a frog",
+ "target_prompt": "a dog and a newt",
+ "generator": generator,
+ "num_inference_steps": 2,
+ "num_maps_per_mask": 2,
+ "mask_encode_strength": 1.0,
+ "guidance_scale": 6.0,
+ "output_type": "numpy",
+ }
+
+ return inputs
+
+ def get_dummy_inversion_inputs(self, device, seed=0):
+ image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
+ image = image.cpu().permute(0, 2, 3, 1)[0]
+ image = Image.fromarray(np.uint8(image)).convert("RGB")
+ if str(device).startswith("mps"):
+ generator = torch.manual_seed(seed)
+ else:
+ generator = torch.Generator(device=device).manual_seed(seed)
+ inputs = {
+ "image": image,
+ "prompt": "a cat and a frog",
+ "generator": generator,
+ "num_inference_steps": 2,
+ "inpaint_strength": 1.0,
+ "guidance_scale": 6.0,
+ "decode_latents": True,
+ "output_type": "numpy",
+ }
+ return inputs
+
+ def test_save_load_optional_components(self):
+ if not hasattr(self.pipeline_class, "_optional_components"):
+ return
+
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ pipe.to(torch_device)
+ pipe.set_progress_bar_config(disable=None)
+
+ # set all optional components to None and update pipeline config accordingly
+ for optional_component in pipe._optional_components:
+ setattr(pipe, optional_component, None)
+ pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
+
+ inputs = self.get_dummy_inputs(torch_device)
+ output = pipe(**inputs)[0]
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ pipe.save_pretrained(tmpdir)
+ pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
+ pipe_loaded.to(torch_device)
+ pipe_loaded.set_progress_bar_config(disable=None)
+
+ for optional_component in pipe._optional_components:
+ self.assertTrue(
+ getattr(pipe_loaded, optional_component) is None,
+ f"`{optional_component}` did not stay set to None after loading.",
+ )
+
+ inputs = self.get_dummy_inputs(torch_device)
+ output_loaded = pipe_loaded(**inputs)[0]
+
+ max_diff = np.abs(output - output_loaded).max()
+ self.assertLess(max_diff, 1e-4)
+
+ def test_mask(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ pipe.to(device)
+ pipe.set_progress_bar_config(disable=None)
+
+ inputs = self.get_dummy_mask_inputs(device)
+ mask = pipe.generate_mask(**inputs)
+ mask_slice = mask[0, -3:, -3:]
+
+ self.assertEqual(mask.shape, (1, 16, 16))
+ expected_slice = np.array([0] * 9)
+ max_diff = np.abs(mask_slice.flatten() - expected_slice).max()
+ self.assertLessEqual(max_diff, 1e-3)
+ self.assertEqual(mask[0, -3, -4], 0)
+
+ def test_inversion(self):
+ device = "cpu"
+
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ pipe.to(device)
+ pipe.set_progress_bar_config(disable=None)
+
+ inputs = self.get_dummy_inversion_inputs(device)
+ image = pipe.invert(**inputs).images
+ image_slice = image[0, -1, -3:, -3:]
+
+ self.assertEqual(image.shape, (2, 32, 32, 3))
+ expected_slice = np.array(
+ [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],
+ )
+ max_diff = np.abs(image_slice.flatten() - expected_slice).max()
+ self.assertLessEqual(max_diff, 1e-3)
+
+
+@require_torch_gpu
+@slow
+class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
+ def tearDown(self):
+ super().tearDown()
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ @classmethod
+ def setUpClass(cls):
+ raw_image = load_image(
+ "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png"
+ )
+
+ raw_image = raw_image.convert("RGB").resize((768, 768))
+
+ cls.raw_image = raw_image
+
+ def test_stable_diffusion_diffedit_full(self):
+ generator = torch.manual_seed(0)
+
+ pipe = StableDiffusionDiffEditPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
+ )
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
+ pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
+ pipe.enable_model_cpu_offload()
+ pipe.set_progress_bar_config(disable=None)
+
+ source_prompt = "a bowl of fruit"
+ target_prompt = "a bowl of pears"
+
+ mask_image = pipe.generate_mask(
+ image=self.raw_image,
+ source_prompt=source_prompt,
+ target_prompt=target_prompt,
+ generator=generator,
+ )
+
+ inv_latents = pipe.invert(
+ prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator
+ ).latents
+
+ image = pipe(
+ prompt=target_prompt,
+ mask_image=mask_image,
+ image_latents=inv_latents,
+ generator=generator,
+ negative_prompt=source_prompt,
+ inpaint_strength=0.7,
+ output_type="numpy",
+ ).images[0]
+
+ expected_image = (
+ np.array(
+ load_image(
+ "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
+ "/diffedit/pears.png"
+ ).resize((768, 768))
+ )
+ / 255
+ )
+ assert np.abs((expected_image - image).max()) < 5e-1