diff --git a/src/diffusers/modeling_flax_utils.py b/src/diffusers/modeling_flax_utils.py index b6448447bacc..7f1d65e2edc0 100644 --- a/src/diffusers/modeling_flax_utils.py +++ b/src/diffusers/modeling_flax_utils.py @@ -436,7 +436,7 @@ def from_pretrained( ) cls._missing_keys = missing_keys - # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not + # Mismatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not # matching the weights in the model. mismatched_keys = [] for key in state.keys(): diff --git a/src/diffusers/pipeline_flax_utils.py b/src/diffusers/pipeline_flax_utils.py index 6cfd7ae32112..9ea94ee0f2e1 100644 --- a/src/diffusers/pipeline_flax_utils.py +++ b/src/diffusers/pipeline_flax_utils.py @@ -244,8 +244,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the - specific pipeline class. The overritten components are then directly passed to the pipelines `__init__` - method. See example below for more information. + specific pipeline class. The overwritten components are then directly passed to the pipelines + `__init__` method. See example below for more information. diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index 566696b90b34..fb8801bc959a 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -235,8 +235,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the - specific pipeline class. The overritten components are then directly passed to the pipelines `__init__` - method. See example below for more information. + specific pipeline class. The overwritten components are then directly passed to the pipelines + `__init__` method. See example below for more information. diff --git a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py index 43b734c97c39..4a4f29be7f75 100644 --- a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +++ b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py @@ -27,7 +27,7 @@ class LDMTextToImagePipeline(DiffusionPipeline): vqvae ([`VQModel`]): Vector-quantized (VQ) Model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): - Text-encoder model based on [BERT](ttps://huggingface.co/docs/transformers/model_doc/bert) architecture. + Text-encoder model based on [BERT](https://huggingface.co/docs/transformers/model_doc/bert) architecture. tokenizer (`transformers.BertTokenizer`): Tokenizer of class [BertTokenizer](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertTokenizer). @@ -397,7 +397,7 @@ def forward( attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be - # partitioned aross GPUs when using tensor-parallelism. + # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) attn_output = self.out_proj(attn_output) diff --git a/src/diffusers/utils/logging.py b/src/diffusers/utils/logging.py index 7771a5a5bf7a..8c1c77d10b2a 100644 --- a/src/diffusers/utils/logging.py +++ b/src/diffusers/utils/logging.py @@ -266,7 +266,7 @@ def reset_format() -> None: def warning_advice(self, *args, **kwargs): """ - This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this + This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed """ no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False)