Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions docs/source/en/api/pipelines/kolors.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,3 +105,11 @@ image.save("kolors_ipa_sample.png")

- all
- __call__

## KolorsImg2ImgPipeline

[[autodoc]] KolorsImg2ImgPipeline

- all
- __call__

Original file line number Diff line number Diff line change
Expand Up @@ -1024,22 +1024,24 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0

timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start * self.scheduler.order)

return timesteps, num_inference_steps - t_start

# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
else:
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)

num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
Expand All @@ -1050,11 +1052,12 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
num_inference_steps = num_inference_steps + 1

# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
t_start = len(self.scheduler.timesteps) - num_inference_steps
timesteps = self.scheduler.timesteps[t_start:]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start)
return timesteps, num_inference_steps

return timesteps, num_inference_steps - t_start

def _get_add_time_ids(
self,
original_size,
Expand Down
23 changes: 13 additions & 10 deletions src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -564,22 +564,24 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0

timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start * self.scheduler.order)

return timesteps, num_inference_steps - t_start

# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
else:
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)

num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
Expand All @@ -590,11 +592,12 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
num_inference_steps = num_inference_steps + 1

# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
t_start = len(self.scheduler.timesteps) - num_inference_steps
timesteps = self.scheduler.timesteps[t_start:]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start)
return timesteps, num_inference_steps

return timesteps, num_inference_steps - t_start

# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
def prepare_latents(
self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
Expand Down
23 changes: 13 additions & 10 deletions src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -648,22 +648,24 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0

timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start * self.scheduler.order)

return timesteps, num_inference_steps - t_start

# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
else:
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)

num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
Expand All @@ -674,11 +676,12 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
num_inference_steps = num_inference_steps + 1

# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
t_start = len(self.scheduler.timesteps) - num_inference_steps
timesteps = self.scheduler.timesteps[t_start:]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start)
return timesteps, num_inference_steps

return timesteps, num_inference_steps - t_start

# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
def prepare_latents(
self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
Expand Down
23 changes: 13 additions & 10 deletions src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -897,22 +897,24 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0

timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start * self.scheduler.order)

return timesteps, num_inference_steps - t_start

# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
else:
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)

num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
Expand All @@ -923,11 +925,12 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
num_inference_steps = num_inference_steps + 1

# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
t_start = len(self.scheduler.timesteps) - num_inference_steps
timesteps = self.scheduler.timesteps[t_start:]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start)
return timesteps, num_inference_steps

return timesteps, num_inference_steps - t_start

# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
def _get_add_time_ids(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -640,22 +640,24 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0

timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start * self.scheduler.order)

return timesteps, num_inference_steps - t_start

# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
else:
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)

num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
Expand All @@ -666,11 +668,12 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
num_inference_steps = num_inference_steps + 1

# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
t_start = len(self.scheduler.timesteps) - num_inference_steps
timesteps = self.scheduler.timesteps[t_start:]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start)
return timesteps, num_inference_steps

return timesteps, num_inference_steps - t_start

def prepare_latents(
self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -901,22 +901,24 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0

timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start * self.scheduler.order)

return timesteps, num_inference_steps - t_start

# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
else:
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)

num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
Expand All @@ -927,11 +929,12 @@ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=N
num_inference_steps = num_inference_steps + 1

# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
t_start = len(self.scheduler.timesteps) - num_inference_steps
timesteps = self.scheduler.timesteps[t_start:]
if hasattr(self.scheduler, "set_begin_index"):
self.scheduler.set_begin_index(t_start)
return timesteps, num_inference_steps

return timesteps, num_inference_steps - t_start

# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
def _get_add_time_ids(
self,
Expand Down