diff --git a/examples/community/checkpoint_merger.py b/examples/community/checkpoint_merger.py index 9df5943a86b1..f702bf0cea9b 100644 --- a/examples/community/checkpoint_merger.py +++ b/examples/community/checkpoint_merger.py @@ -138,7 +138,6 @@ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike] comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx]) if not force and comparison_result is False: raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.") - print(config_dicts[0], config_dicts[1]) print("Compatible model_index.json files found") # Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files. cached_folders = [] diff --git a/examples/community/latent_consistency_img2img.py b/examples/community/latent_consistency_img2img.py index 98078a2eef96..3c5ffa845699 100644 --- a/examples/community/latent_consistency_img2img.py +++ b/examples/community/latent_consistency_img2img.py @@ -240,14 +240,6 @@ def prepare_latents( return latents - if latents is None: - latents = torch.randn(shape, dtype=dtype).to(device) - else: - latents = latents.to(device) - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 diff --git a/scripts/convert_zero123_to_diffusers.py b/scripts/convert_zero123_to_diffusers.py index 669a4962be3c..b46633fae7ff 100644 --- a/scripts/convert_zero123_to_diffusers.py +++ b/scripts/convert_zero123_to_diffusers.py @@ -113,7 +113,7 @@ def create_unet_diffusers_config(original_config, image_size: int, controlnet=Fa assert "adm_in_channels" in unet_params projection_class_embeddings_input_dim = unet_params["adm_in_channels"] else: - raise NotImplementedError(f"Unknown conditional unet num_classes config: {unet_params["num_classes"]}") + raise NotImplementedError(f"Unknown conditional unet num_classes config: {unet_params['num_classes']}") config = { "sample_size": image_size // vae_scale_factor, diff --git a/src/diffusers/image_processor.py b/src/diffusers/image_processor.py index 4ccb9d77d627..0f4481570829 100644 --- a/src/diffusers/image_processor.py +++ b/src/diffusers/image_processor.py @@ -80,7 +80,6 @@ def __init__( " if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.", " if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`", ) - self.config.do_convert_rgb = False @staticmethod def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]: