Skip to content

Commit 994a76a

Browse files
authored
[Enhancement] distinguish v1 from v2 LoRA models (#3175)
# Distinguish LoRA/LyCORIS files based on what version of SD they were built on top of - Attempting to run a prompt with a LoRA based on SD v1.X against a model based on v2.X will now throw an `IncompatibleModelException`. To import this exception: `from ldm.modules.lora_manager import IncompatibleModelException` (maybe this should be defined in ModelManager?) - Enhance `LoraManager.list_loras()` to accept an optional integer argument, `token_vector_length`. This will filter the returned LoRA models to return only those that match the indicated length. Use: ``` 768 => for models based on SD v1.X 1024 => for models based on SD v2.X ``` Note that this filtering requires each LoRA file to be opened by `torch.safetensors`. It will take ~8s to scan a directory of 40 files. - Added new static methods to `ldm.modules.kohya_lora_manager`: - check_model_compatibility() - vector_length_from_checkpoint() - vector_length_from_checkpoint_file() - You can now create subdirectories within the `loras` directory and organize the model files.
2 parents 5dbc63e + 144dfe4 commit 994a76a

File tree

11 files changed

+278
-120
lines changed

11 files changed

+278
-120
lines changed

invokeai/backend/invoke_ai_web_server.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,11 @@
3737
Globals,
3838
global_converted_ckpts_dir,
3939
global_models_dir,
40-
global_lora_models_dir,
4140
)
4241
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
4342
from compel.prompt_parser import Blend
4443
from ldm.invoke.merge_diffusers import merge_diffusion_models
44+
from ldm.modules.lora_manager import LoraManager
4545

4646
# Loading Arguments
4747
opt = Args()
@@ -523,20 +523,12 @@ def merge_diffusers_models(model_merge_info: dict):
523523
@socketio.on("getLoraModels")
524524
def get_lora_models():
525525
try:
526-
lora_path = global_lora_models_dir()
527-
loras = []
528-
for root, _, files in os.walk(lora_path):
529-
models = [
530-
Path(root, x)
531-
for x in files
532-
if Path(x).suffix in [".ckpt", ".pt", ".safetensors"]
533-
]
534-
loras = loras + models
535-
526+
model = self.generate.model
527+
lora_mgr = LoraManager(model)
528+
loras = lora_mgr.list_compatible_loras()
536529
found_loras = []
537-
for lora in sorted(loras, key=lambda s: s.stem.lower()):
538-
location = str(lora.resolve()).replace("\\", "/")
539-
found_loras.append({"name": lora.stem, "location": location})
530+
for lora in sorted(loras, key=str.casefold):
531+
found_loras.append({"name":lora,"location":str(loras[lora])})
540532
socketio.emit("foundLoras", found_loras)
541533
except Exception as e:
542534
self.handle_exceptions(e)

invokeai/configs/INITIAL_MODELS.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,8 @@ trinart-2.0:
8080
repo_id: stabilityai/sd-vae-ft-mse
8181
recommended: False
8282
waifu-diffusion-1.4:
83-
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
83+
description: An SD-2.1 model trained on 5.4M anime/manga-style images (4.27 GB)
84+
revision: main
8485
repo_id: hakurei/waifu-diffusion
8586
format: diffusers
8687
vae:

invokeai/frontend/dist/assets/index-f56b39bc.js renamed to invokeai/frontend/dist/assets/index-b12e648e.js

Lines changed: 36 additions & 36 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

invokeai/frontend/dist/index.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
66
<title>InvokeAI - A Stable Diffusion Toolkit</title>
77
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
8-
<script type="module" crossorigin src="./assets/index-f56b39bc.js"></script>
8+
<script type="module" crossorigin src="./assets/index-b12e648e.js"></script>
99
<link rel="stylesheet" href="./assets/index-2ab0eb58.css">
1010
</head>
1111

invokeai/frontend/src/app/socketio/listeners.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,10 @@ import {
3333
setIntermediateImage,
3434
} from 'features/gallery/store/gallerySlice';
3535

36+
import {
37+
getLoraModels,
38+
getTextualInversionTriggers,
39+
} from 'app/socketio/actions';
3640
import type { RootState } from 'app/store';
3741
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
3842
import {
@@ -463,6 +467,8 @@ const makeSocketIOListeners = (
463467
const { model_name, model_list } = data;
464468
dispatch(setModelList(model_list));
465469
dispatch(setCurrentStatus(i18n.t('common.statusModelChanged')));
470+
dispatch(getLoraModels());
471+
dispatch(getTextualInversionTriggers());
466472
dispatch(setIsProcessing(false));
467473
dispatch(setIsCancelable(true));
468474
dispatch(

invokeai/frontend/stats.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

ldm/generate.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -633,9 +633,8 @@ def process_image(image,seed):
633633
except RuntimeError:
634634
# Clear the CUDA cache on an exception
635635
self.clear_cuda_cache()
636-
637-
print(traceback.format_exc(), file=sys.stderr)
638-
print(">> Could not generate image.")
636+
print("** Could not generate image.")
637+
raise
639638

640639
toc = time.time()
641640
print("\n>> Usage stats:")

ldm/invoke/config/model_install_backend.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
import requests
1313
from diffusers import AutoencoderKL
14+
from diffusers import logging as dlogging
1415
from huggingface_hub import hf_hub_url
1516
from omegaconf import OmegaConf
1617
from omegaconf.dictconfig import DictConfig
@@ -295,13 +296,21 @@ def _download_diffusion_weights(
295296
mconfig: DictConfig, access_token: str, precision: str = "float32"
296297
):
297298
repo_id = mconfig["repo_id"]
299+
revision = mconfig.get('revision',None)
298300
model_class = (
299301
StableDiffusionGeneratorPipeline
300302
if mconfig.get("format", None) == "diffusers"
301303
else AutoencoderKL
302304
)
303-
extra_arg_list = [{"revision": "fp16"}, {}] if precision == "float16" else [{}]
305+
extra_arg_list = [{"revision": revision}] if revision \
306+
else [{"revision": "fp16"}, {}] if precision == "float16" \
307+
else [{}]
304308
path = None
309+
310+
# quench safety checker warnings
311+
verbosity = dlogging.get_verbosity()
312+
dlogging.set_verbosity_error()
313+
305314
for extra_args in extra_arg_list:
306315
try:
307316
path = download_from_hf(
@@ -317,6 +326,7 @@ def _download_diffusion_weights(
317326
print(f"An unexpected error occurred while downloading the model: {e})")
318327
if path:
319328
break
329+
dlogging.set_verbosity(verbosity)
320330
return path
321331

322332

@@ -447,6 +457,8 @@ def new_config_file_contents(
447457
stanza["description"] = mod["description"]
448458
stanza["repo_id"] = mod["repo_id"]
449459
stanza["format"] = mod["format"]
460+
if "revision" in mod:
461+
stanza["revision"] = mod["revision"]
450462
# diffusers don't need width and height (probably .ckpt doesn't either)
451463
# so we no longer require these in INITIAL_MODELS.yaml
452464
if "width" in mod:
@@ -471,10 +483,9 @@ def new_config_file_contents(
471483

472484
conf[model] = stanza
473485

474-
# if no default model was chosen, then we select the first
475-
# one in the list
486+
# if no default model was chosen, then we select the first one in the list
476487
if not default_selected:
477-
conf[list(successfully_downloaded.keys())[0]]["default"] = True
488+
conf[list(conf.keys())[0]]["default"] = True
478489

479490
return OmegaConf.to_yaml(conf)
480491

ldm/invoke/model_manager.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -372,12 +372,6 @@ def _load_ckpt_model(self, model_name, mconfig):
372372
)
373373
from ldm.invoke.ckpt_to_diffuser import load_pipeline_from_original_stable_diffusion_ckpt
374374

375-
# try:
376-
# if self.list_models()[self.current_model]['status'] == 'active':
377-
# self.offload_model(self.current_model)
378-
# except Exception:
379-
# pass
380-
381375
if self._has_cuda():
382376
torch.cuda.empty_cache()
383377
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
@@ -423,9 +417,9 @@ def _load_diffusers_model(self, mconfig):
423417
pipeline_args.update(cache_dir=global_cache_dir("hub"))
424418
if using_fp16:
425419
pipeline_args.update(torch_dtype=torch.float16)
426-
fp_args_list = [{"revision": "fp16"}, {}]
427-
else:
428-
fp_args_list = [{}]
420+
revision = mconfig.get('revision') or ('fp16' if using_fp16 else None)
421+
fp_args_list = [{"revision": revision}] if revision else []
422+
fp_args_list.append({})
429423

430424
verbosity = dlogging.get_verbosity()
431425
dlogging.set_verbosity_error()

0 commit comments

Comments
 (0)