diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index d58bbdac1867..965213f7cb8e 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -12,6 +12,7 @@ is_note_seq_available, is_onnx_available, is_scipy_available, + is_sentencepiece_available, is_torch_available, is_torchsde_available, is_transformers_available, @@ -246,8 +247,6 @@ "AuraFlowPipeline", "BlipDiffusionControlNetPipeline", "BlipDiffusionPipeline", - "ChatGLMModel", - "ChatGLMTokenizer", "CLIPImageProjection", "CycleDiffusionPipeline", "FluxPipeline", @@ -385,6 +384,19 @@ else: _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"]) +try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_and_sentencepiece_objects"] = [ + name for name in dir(dummy_torch_and_transformers_and_sentencepiece_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["KolorsImg2ImgPipeline", "KolorsPipeline"]) + try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() @@ -669,8 +681,6 @@ AudioLDM2UNet2DConditionModel, AudioLDMPipeline, AuraFlowPipeline, - ChatGLMModel, - ChatGLMTokenizer, CLIPImageProjection, CycleDiffusionPipeline, FluxPipeline, @@ -703,8 +713,6 @@ KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, - KolorsImg2ImgPipeline, - KolorsPipeline, LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline, LattePipeline, @@ -802,6 +810,13 @@ else: from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline + try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_and_sentencepiece_objects import * # noqa F403 + else: + from .pipelines import KolorsImg2ImgPipeline, KolorsPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 10f6c4a92054..a436f051f019 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -10,6 +10,7 @@ is_librosa_available, is_note_seq_available, is_onnx_available, + is_sentencepiece_available, is_torch_available, is_torch_npu_available, is_transformers_available, @@ -204,12 +205,6 @@ "Kandinsky3Img2ImgPipeline", "Kandinsky3Pipeline", ] - _import_structure["kolors"] = [ - "KolorsPipeline", - "KolorsImg2ImgPipeline", - "ChatGLMModel", - "ChatGLMTokenizer", - ] _import_structure["latent_consistency_models"] = [ "LatentConsistencyModelImg2ImgPipeline", "LatentConsistencyModelPipeline", @@ -349,6 +344,22 @@ "StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline", ] + +try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import ( + dummy_torch_and_transformers_and_sentencepiece_objects, + ) + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) +else: + _import_structure["kolors"] = [ + "KolorsPipeline", + "KolorsImg2ImgPipeline", + ] + try: if not is_flax_available(): raise OptionalDependencyNotAvailable() @@ -506,12 +517,6 @@ Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline, ) - from .kolors import ( - ChatGLMModel, - ChatGLMTokenizer, - KolorsImg2ImgPipeline, - KolorsPipeline, - ) from .latent_consistency_models import ( LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline, @@ -640,6 +645,17 @@ StableDiffusionXLKDiffusionPipeline, ) + try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_sentencepiece_objects import * + else: + from .kolors import ( + KolorsImg2ImgPipeline, + KolorsPipeline, + ) + try: if not is_flax_available(): raise OptionalDependencyNotAvailable() diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index 854cfaa47b7a..c91862e1c80d 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -18,6 +18,7 @@ from huggingface_hub.utils import validate_hf_hub_args from ..configuration_utils import ConfigMixin +from ..utils import is_sentencepiece_available from .aura_flow import AuraFlowPipeline from .controlnet import ( StableDiffusionControlNetImg2ImgPipeline, @@ -47,7 +48,6 @@ KandinskyV22Pipeline, ) from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline -from .kolors import KolorsImg2ImgPipeline, KolorsPipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline from .pag import ( PixArtSigmaPAGPipeline, @@ -101,7 +101,6 @@ ("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGPipeline), ("pixart-sigma-pag", PixArtSigmaPAGPipeline), ("auraflow", AuraFlowPipeline), - ("kolors", KolorsPipeline), ("flux", FluxPipeline), ] ) @@ -119,7 +118,6 @@ ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline), ("stable-diffusion-xl-pag", StableDiffusionXLPAGImg2ImgPipeline), ("lcm", LatentConsistencyModelImg2ImgPipeline), - ("kolors", KolorsImg2ImgPipeline), ] ) @@ -158,6 +156,12 @@ ] ) +if is_sentencepiece_available(): + from .kolors import KolorsPipeline + + AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline + SUPPORTED_TASKS_MAPPINGS = [ AUTO_TEXT2IMAGE_PIPELINES_MAPPING, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, diff --git a/src/diffusers/pipelines/kolors/__init__.py b/src/diffusers/pipelines/kolors/__init__.py index 843ee93c257f..671d22e9f433 100644 --- a/src/diffusers/pipelines/kolors/__init__.py +++ b/src/diffusers/pipelines/kolors/__init__.py @@ -5,6 +5,7 @@ OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, + is_sentencepiece_available, is_torch_available, is_transformers_available, ) @@ -14,12 +15,12 @@ _import_structure = {} try: - if not (is_transformers_available() and is_torch_available()): + if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ...utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403 - _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) else: _import_structure["pipeline_kolors"] = ["KolorsPipeline"] _import_structure["pipeline_kolors_img2img"] = ["KolorsImg2ImgPipeline"] @@ -28,10 +29,10 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: - if not (is_transformers_available() and is_torch_available()): + if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ...utils.dummy_torch_and_transformers_and_sentencepiece_objects import * else: from .pipeline_kolors import KolorsPipeline diff --git a/src/diffusers/utils/__init__.py b/src/diffusers/utils/__init__.py index d2633d2ec9e7..c7ea2bcc5b7f 100644 --- a/src/diffusers/utils/__init__.py +++ b/src/diffusers/utils/__init__.py @@ -78,6 +78,7 @@ is_peft_version, is_safetensors_available, is_scipy_available, + is_sentencepiece_available, is_tensorboard_available, is_timm_available, is_torch_available, diff --git a/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py new file mode 100644 index 000000000000..a70d003f7fc6 --- /dev/null +++ b/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py @@ -0,0 +1,32 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class KolorsImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + +class KolorsPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 3e9a33503906..840f16d54800 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -242,36 +242,6 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class ChatGLMModel(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - -class ChatGLMTokenizer(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - class CLIPImageProjection(metaclass=DummyObject): _backends = ["torch", "transformers"] @@ -752,36 +722,6 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class KolorsImg2ImgPipeline(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - -class KolorsPipeline(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] diff --git a/src/diffusers/utils/import_utils.py b/src/diffusers/utils/import_utils.py index 44477df2e220..09cb715a6068 100644 --- a/src/diffusers/utils/import_utils.py +++ b/src/diffusers/utils/import_utils.py @@ -294,6 +294,13 @@ except importlib_metadata.PackageNotFoundError: _torchvision_available = False +_sentencepiece_available = importlib.util.find_spec("sentencepiece") is not None +try: + _sentencepiece_version = importlib_metadata.version("sentencepiece") + logger.info(f"Successfully imported sentencepiece version {_sentencepiece_version}") +except importlib_metadata.PackageNotFoundError: + _sentencepiece_available = False + _matplotlib_available = importlib.util.find_spec("matplotlib") is not None try: _matplotlib_version = importlib_metadata.version("matplotlib") @@ -436,6 +443,10 @@ def is_google_colab(): return _is_google_colab +def is_sentencepiece_available(): + return _sentencepiece_available + + # docstyle-ignore FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the @@ -553,6 +564,12 @@ def is_google_colab(): {0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors` """ +# docstyle-ignore +SENTENCEPIECE_IMPORT_ERROR = """ +{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece` +""" + + # docstyle-ignore BITSANDBYTES_IMPORT_ERROR = """ {0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes` @@ -581,6 +598,7 @@ def is_google_colab(): ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)), ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)), + ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ] ) diff --git a/tests/pipelines/kolors/test_kolors.py b/tests/pipelines/kolors/test_kolors.py index 3f7fcaf59575..719a5ef101e7 100644 --- a/tests/pipelines/kolors/test_kolors.py +++ b/tests/pipelines/kolors/test_kolors.py @@ -20,12 +20,11 @@ from diffusers import ( AutoencoderKL, - ChatGLMModel, - ChatGLMTokenizer, EulerDiscreteScheduler, KolorsPipeline, UNet2DConditionModel, ) +from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import (