diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 894d63bf2df0..91945ed1ab91 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -62,7 +62,7 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin): [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. + [`~ConfigMixin.from_config`] functions. For more details, see the original paper: https://arxiv.org/abs/2010.02502 @@ -73,8 +73,8 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin): beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`np.ndarray`, optional): TODO - timestep_values (`np.ndarray`, optional): TODO + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. clip_sample (`bool`, default `True`): option to clip predicted sample between -1 and 1 for numerical stability. set_alpha_to_one (`bool`, default `True`): @@ -91,7 +91,6 @@ def __init__( beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[np.ndarray] = None, - timestep_values: Optional[np.ndarray] = None, clip_sample: bool = True, set_alpha_to_one: bool = True, tensor_format: str = "pt", @@ -142,7 +141,8 @@ def set_timesteps(self, num_inference_steps: int, offset: int = 0): Args: num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. - offset (`int`): TODO + offset (`int`): + optional value to shift timestep values up by. A value of 1 is used in stable diffusion for inference. """ self.num_inference_steps = num_inference_steps self.timesteps = np.arange( diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 020739406e71..2b02bfa60e90 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -61,7 +61,7 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. + [`~ConfigMixin.from_config`] functions. For more details, see the original paper: https://arxiv.org/abs/2006.11239 @@ -72,7 +72,8 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`np.ndarray`, optional): TODO + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. variance_type (`str`): options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. diff --git a/src/diffusers/schedulers/scheduling_karras_ve.py b/src/diffusers/schedulers/scheduling_karras_ve.py index 3a2370cfc3e0..4b4c90d07c5d 100644 --- a/src/diffusers/schedulers/scheduling_karras_ve.py +++ b/src/diffusers/schedulers/scheduling_karras_ve.py @@ -53,7 +53,7 @@ class KarrasVeScheduler(SchedulerMixin, ConfigMixin): [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. + [`~ConfigMixin.from_config`] functions. For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the diff --git a/src/diffusers/schedulers/scheduling_lms_discrete.py b/src/diffusers/schedulers/scheduling_lms_discrete.py index 1381587febf1..750ec9317e7f 100644 --- a/src/diffusers/schedulers/scheduling_lms_discrete.py +++ b/src/diffusers/schedulers/scheduling_lms_discrete.py @@ -32,7 +32,7 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. + [`~ConfigMixin.from_config`] functions. Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. @@ -41,10 +41,10 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear` or `scaled_linear`. - trained_betas (`np.ndarray`, optional): TODO + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. - timestep_values (`np.ndarry`, optional): TODO tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays. """ @@ -57,7 +57,6 @@ def __init__( beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[np.ndarray] = None, - timestep_values: Optional[np.ndarray] = None, tensor_format: str = "pt", ): if trained_betas is not None: diff --git a/src/diffusers/schedulers/scheduling_pndm.py b/src/diffusers/schedulers/scheduling_pndm.py index b43d88bbab77..22d492c16172 100644 --- a/src/diffusers/schedulers/scheduling_pndm.py +++ b/src/diffusers/schedulers/scheduling_pndm.py @@ -61,7 +61,7 @@ class PNDMScheduler(SchedulerMixin, ConfigMixin): [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. + [`~ConfigMixin.from_config`] functions. For more details, see the original paper: https://arxiv.org/abs/2202.09778 @@ -72,7 +72,8 @@ class PNDMScheduler(SchedulerMixin, ConfigMixin): beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`np.ndarray`, optional): TODO + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays skip_prk_steps (`bool`): allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required @@ -138,7 +139,8 @@ def set_timesteps(self, num_inference_steps: int, offset: int = 0) -> torch.Floa Args: num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. - offset (`int`): TODO + offset (`int`): + optional value to shift timestep values up by. A value of 1 is used in stable diffusion for inference. """ self.num_inference_steps = num_inference_steps self._timesteps = list( diff --git a/src/diffusers/schedulers/scheduling_sde_ve.py b/src/diffusers/schedulers/scheduling_sde_ve.py index e187f0796887..a880b5aa30ab 100644 --- a/src/diffusers/schedulers/scheduling_sde_ve.py +++ b/src/diffusers/schedulers/scheduling_sde_ve.py @@ -52,7 +52,7 @@ class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. + [`~ConfigMixin.from_config`] functions. Args: snr (`float`): @@ -61,7 +61,7 @@ class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the distribution of the data. sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model. - sampling_eps (`float`): the end value of sampling, where timesteps decrease progessively from 1 to + sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to epsilon. correct_steps (`int`): number of correction steps performed on a produced sample. tensor_format (`str`): "np" or "pt" for the expected format of samples passed to the Scheduler. diff --git a/src/diffusers/schedulers/scheduling_sde_vp.py b/src/diffusers/schedulers/scheduling_sde_vp.py index 8ada5b8c97d9..c30237395ae1 100644 --- a/src/diffusers/schedulers/scheduling_sde_vp.py +++ b/src/diffusers/schedulers/scheduling_sde_vp.py @@ -30,7 +30,7 @@ class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. + [`~ConfigMixin.from_config`] functions. For more information, see the original paper: https://arxiv.org/abs/2011.13456