Skip to content

Commit c45b1c7

Browse files
committed
actual style.
1 parent 07a18fd commit c45b1c7

File tree

13 files changed

+46
-41
lines changed

13 files changed

+46
-41
lines changed

examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -744,9 +744,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]):
744744
.to(dtype=self.dtype)
745745
* std_token_embedding
746746
)
747-
self.embeddings_settings[f"original_embeddings_{idx}"] = (
748-
text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
749-
)
747+
self.embeddings_settings[
748+
f"original_embeddings_{idx}"
749+
] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
750750
self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding
751751

752752
inu = torch.ones((len(tokenizer),), dtype=torch.bool)

examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -776,9 +776,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]):
776776
.to(dtype=self.dtype)
777777
* std_token_embedding
778778
)
779-
self.embeddings_settings[f"original_embeddings_{idx}"] = (
780-
text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
781-
)
779+
self.embeddings_settings[
780+
f"original_embeddings_{idx}"
781+
] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
782782
self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding
783783

784784
inu = torch.ones((len(tokenizer),), dtype=torch.bool)

examples/research_projects/multi_token_textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -830,9 +830,9 @@ def main():
830830
# Let's make sure we don't update any embedding weights besides the newly added token
831831
index_no_updates = get_mask(tokenizer, accelerator)
832832
with torch.no_grad():
833-
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = (
834-
orig_embeds_params[index_no_updates]
835-
)
833+
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
834+
index_no_updates
835+
] = orig_embeds_params[index_no_updates]
836836

837837
# Checks if the accelerator has performed an optimization step behind the scenes
838838
if accelerator.sync_gradients:

examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -886,9 +886,9 @@ def main():
886886
index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
887887

888888
with torch.no_grad():
889-
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = (
890-
orig_embeds_params[index_no_updates]
891-
)
889+
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
890+
index_no_updates
891+
] = orig_embeds_params[index_no_updates]
892892

893893
# Checks if the accelerator has performed an optimization step behind the scenes
894894
if accelerator.sync_gradients:

examples/textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -910,9 +910,9 @@ def main():
910910
index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
911911

912912
with torch.no_grad():
913-
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = (
914-
orig_embeds_params[index_no_updates]
915-
)
913+
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
914+
index_no_updates
915+
] = orig_embeds_params[index_no_updates]
916916

917917
# Checks if the accelerator has performed an optimization step behind the scenes
918918
if accelerator.sync_gradients:

examples/textual_inversion/textual_inversion_sdxl.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -940,9 +940,9 @@ def main():
940940
index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
941941

942942
with torch.no_grad():
943-
accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[index_no_updates] = (
944-
orig_embeds_params[index_no_updates]
945-
)
943+
accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[
944+
index_no_updates
945+
] = orig_embeds_params[index_no_updates]
946946

947947
# Checks if the accelerator has performed an optimization step behind the scenes
948948
if accelerator.sync_gradients:

scripts/convert_svd_to_diffusers.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -381,9 +381,9 @@ def convert_ldm_unet_checkpoint(
381381

382382
# TODO resnet time_mixer.mix_factor
383383
if f"input_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict:
384-
new_checkpoint[f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = (
385-
unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"]
386-
)
384+
new_checkpoint[
385+
f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"
386+
] = unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"]
387387

388388
if len(attentions):
389389
paths = renew_attention_paths(attentions)
@@ -478,9 +478,9 @@ def convert_ldm_unet_checkpoint(
478478
)
479479

480480
if f"output_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict:
481-
new_checkpoint[f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = (
482-
unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"]
483-
)
481+
new_checkpoint[
482+
f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"
483+
] = unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"]
484484

485485
output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
486486
if ["conv.bias", "conv.weight"] in output_block_list.values():

src/diffusers/loaders/lora.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1288,9 +1288,9 @@ def set_lora_device(self, adapter_names: List[str], device: Union[torch.device,
12881288
text_encoder_module.lora_A[adapter_name].to(device)
12891289
text_encoder_module.lora_B[adapter_name].to(device)
12901290
# this is a param, not a module, so device placement is not in-place -> re-assign
1291-
text_encoder_module.lora_magnitude_vector[adapter_name] = (
1292-
text_encoder_module.lora_magnitude_vector[adapter_name].to(device)
1293-
)
1291+
text_encoder_module.lora_magnitude_vector[
1292+
adapter_name
1293+
] = text_encoder_module.lora_magnitude_vector[adapter_name].to(device)
12941294

12951295

12961296
class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):

src/diffusers/loaders/lora_conversion_utils.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -209,9 +209,9 @@ def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_
209209

210210
if is_unet_dora_lora:
211211
dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down."
212-
unet_state_dict[diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.")] = (
213-
state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
214-
)
212+
unet_state_dict[
213+
diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.")
214+
] = state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
215215

216216
elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")):
217217
if lora_name.startswith(("lora_te_", "lora_te1_")):
@@ -249,13 +249,13 @@ def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_
249249
"_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer."
250250
)
251251
if lora_name.startswith(("lora_te_", "lora_te1_")):
252-
te_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = (
253-
state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
254-
)
252+
te_state_dict[
253+
diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")
254+
] = state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
255255
elif lora_name.startswith("lora_te2_"):
256-
te2_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = (
257-
state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
258-
)
256+
te2_state_dict[
257+
diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")
258+
] = state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
259259

260260
# Rename the alphas so that they can be mapped appropriately.
261261
if lora_name_alpha in state_dict:

tests/models/autoencoders/test_models_vae.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -384,10 +384,12 @@ def prepare_init_args_and_inputs_for_common(self):
384384
return self.init_dict, self.inputs_dict()
385385

386386
@unittest.skip
387-
def test_training(self): ...
387+
def test_training(self):
388+
...
388389

389390
@unittest.skip
390-
def test_ema_training(self): ...
391+
def test_ema_training(self):
392+
...
391393

392394

393395
class AutoencoderKLTemporalDecoderFastTests(ModelTesterMixin, unittest.TestCase):

0 commit comments

Comments
 (0)