Skip to content

Commit 4fbd310

Browse files
authored
[Chore] switch to logger.warning (#7289)
switch to logger.warning
1 parent 2ea28d6 commit 4fbd310

File tree

61 files changed

+138
-136
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+138
-136
lines changed

examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1215,7 +1215,7 @@ def main(args):
12151215

12161216
xformers_version = version.parse(xformers.__version__)
12171217
if xformers_version == version.parse("0.0.16"):
1218-
logger.warn(
1218+
logger.warning(
12191219
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
12201220
"please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
12211221
)
@@ -1366,14 +1366,14 @@ def load_model_hook(models, input_dir):
13661366

13671367
# Optimizer creation
13681368
if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
1369-
logger.warn(
1369+
logger.warning(
13701370
f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
13711371
"Defaulting to adamW"
13721372
)
13731373
args.optimizer = "adamw"
13741374

13751375
if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
1376-
logger.warn(
1376+
logger.warning(
13771377
f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
13781378
f"set to {args.optimizer.lower()}"
13791379
)
@@ -1407,11 +1407,11 @@ def load_model_hook(models, input_dir):
14071407
optimizer_class = prodigyopt.Prodigy
14081408

14091409
if args.learning_rate <= 0.1:
1410-
logger.warn(
1410+
logger.warning(
14111411
"Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
14121412
)
14131413
if args.train_text_encoder and args.text_encoder_lr:
1414-
logger.warn(
1414+
logger.warning(
14151415
f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
14161416
f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
14171417
f"When using prodigy only learning_rate is used as the initial learning rate."

examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1317,7 +1317,7 @@ def main(args):
13171317

13181318
xformers_version = version.parse(xformers.__version__)
13191319
if xformers_version == version.parse("0.0.16"):
1320-
logger.warn(
1320+
logger.warning(
13211321
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
13221322
"please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
13231323
)
@@ -1522,14 +1522,14 @@ def load_model_hook(models, input_dir):
15221522

15231523
# Optimizer creation
15241524
if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
1525-
logger.warn(
1525+
logger.warning(
15261526
f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
15271527
"Defaulting to adamW"
15281528
)
15291529
args.optimizer = "adamw"
15301530

15311531
if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
1532-
logger.warn(
1532+
logger.warning(
15331533
f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
15341534
f"set to {args.optimizer.lower()}"
15351535
)
@@ -1563,11 +1563,11 @@ def load_model_hook(models, input_dir):
15631563
optimizer_class = prodigyopt.Prodigy
15641564

15651565
if args.learning_rate <= 0.1:
1566-
logger.warn(
1566+
logger.warning(
15671567
"Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
15681568
)
15691569
if args.train_text_encoder and args.text_encoder_lr:
1570-
logger.warn(
1570+
logger.warning(
15711571
f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
15721572
f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
15731573
f"When using prodigy only learning_rate is used as the initial learning rate."

examples/community/pipeline_stable_diffusion_xl_instantid.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -452,7 +452,7 @@ def cuda(self, dtype=torch.float16, use_xformers=False):
452452

453453
xformers_version = version.parse(xformers.__version__)
454454
if xformers_version == version.parse("0.0.16"):
455-
logger.warn(
455+
logger.warning(
456456
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
457457
)
458458
self.enable_xformers_memory_efficient_attention()

examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step):
308308

309309
tracker.log({"validation": formatted_images})
310310
else:
311-
logger.warn(f"image logging not implemented for {tracker.name}")
311+
logger.warning(f"image logging not implemented for {tracker.name}")
312312

313313
del pipeline
314314
gc.collect()
@@ -1068,7 +1068,7 @@ def load_model_hook(models, input_dir):
10681068

10691069
xformers_version = version.parse(xformers.__version__)
10701070
if xformers_version == version.parse("0.0.16"):
1071-
logger.warn(
1071+
logger.warning(
10721072
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
10731073
)
10741074
unet.enable_xformers_memory_efficient_attention()

examples/consistency_distillation/train_lcm_distill_lora_sdxl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ def log_validation(vae, args, accelerator, weight_dtype, step, unet=None, is_fin
180180
logger_name = "test" if is_final_validation else "validation"
181181
tracker.log({logger_name: formatted_images})
182182
else:
183-
logger.warn(f"image logging not implemented for {tracker.name}")
183+
logger.warning(f"image logging not implemented for {tracker.name}")
184184

185185
del pipeline
186186
gc.collect()
@@ -928,7 +928,7 @@ def load_model_hook(models, input_dir):
928928

929929
xformers_version = version.parse(xformers.__version__)
930930
if xformers_version == version.parse("0.0.16"):
931-
logger.warn(
931+
logger.warning(
932932
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
933933
)
934934
unet.enable_xformers_memory_efficient_attention()

examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step):
325325

326326
tracker.log({"validation": formatted_images})
327327
else:
328-
logger.warn(f"image logging not implemented for {tracker.name}")
328+
logger.warning(f"image logging not implemented for {tracker.name}")
329329

330330
del pipeline
331331
gc.collect()
@@ -1083,7 +1083,7 @@ def load_model_hook(models, input_dir):
10831083

10841084
xformers_version = version.parse(xformers.__version__)
10851085
if xformers_version == version.parse("0.0.16"):
1086-
logger.warn(
1086+
logger.warning(
10871087
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
10881088
)
10891089
unet.enable_xformers_memory_efficient_attention()

examples/consistency_distillation/train_lcm_distill_sd_wds.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe
285285

286286
tracker.log({f"validation/{name}": formatted_images})
287287
else:
288-
logger.warn(f"image logging not implemented for {tracker.name}")
288+
logger.warning(f"image logging not implemented for {tracker.name}")
289289

290290
del pipeline
291291
gc.collect()
@@ -1023,7 +1023,7 @@ def load_model_hook(models, input_dir):
10231023

10241024
xformers_version = version.parse(xformers.__version__)
10251025
if xformers_version == version.parse("0.0.16"):
1026-
logger.warn(
1026+
logger.warning(
10271027
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
10281028
)
10291029
unet.enable_xformers_memory_efficient_attention()

examples/consistency_distillation/train_lcm_distill_sdxl_wds.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe
303303

304304
tracker.log({f"validation/{name}": formatted_images})
305305
else:
306-
logger.warn(f"image logging not implemented for {tracker.name}")
306+
logger.warning(f"image logging not implemented for {tracker.name}")
307307

308308
del pipeline
309309
gc.collect()
@@ -1083,7 +1083,7 @@ def load_model_hook(models, input_dir):
10831083

10841084
xformers_version = version.parse(xformers.__version__)
10851085
if xformers_version == version.parse("0.0.16"):
1086-
logger.warn(
1086+
logger.warning(
10871087
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
10881088
)
10891089
unet.enable_xformers_memory_efficient_attention()

examples/controlnet/train_controlnet.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def log_validation(
178178

179179
tracker.log({tracker_key: formatted_images})
180180
else:
181-
logger.warn(f"image logging not implemented for {tracker.name}")
181+
logger.warning(f"image logging not implemented for {tracker.name}")
182182

183183
del pipeline
184184
gc.collect()
@@ -861,7 +861,7 @@ def load_model_hook(models, input_dir):
861861

862862
xformers_version = version.parse(xformers.__version__)
863863
if xformers_version == version.parse("0.0.16"):
864-
logger.warn(
864+
logger.warning(
865865
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
866866
)
867867
unet.enable_xformers_memory_efficient_attention()

examples/controlnet/train_controlnet_flax.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def log_validation(pipeline, pipeline_params, controlnet_params, tokenizer, args
128128

129129
wandb.log({"validation": formatted_images})
130130
else:
131-
logger.warn(f"image logging not implemented for {args.report_to}")
131+
logger.warning(f"image logging not implemented for {args.report_to}")
132132

133133
return image_logs
134134

0 commit comments

Comments
 (0)