Skip to content

Commit 2595c1d

Browse files
LoRA model loading fixes (#3663)
This PR enables model manager importation of diffusers-style .bin LoRAs. However, since there is no backend support for this type of LoRA yet, attempts to use them will result in an unimplemented error. It closes #3636 and #3637
2 parents 94e38e9 + c2eb6c3 commit 2595c1d

File tree

4 files changed

+14
-10
lines changed

4 files changed

+14
-10
lines changed

invokeai/backend/install/model_install_backend.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,10 @@ def heuristic_import(self,
193193
models_installed.update(self._install_path(path))
194194

195195
# folders style or similar
196-
elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
196+
elif path.is_dir() and any([(path/x).exists() for x in \
197+
{'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}
198+
]
199+
):
197200
models_installed.update(self._install_path(path))
198201

199202
# recursive scan

invokeai/backend/model_management/model_manager.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -785,7 +785,7 @@ def autoimport(self)->Dict[str, AddModelResult]:
785785
if path in known_paths or path.parent in scanned_dirs:
786786
scanned_dirs.add(path)
787787
continue
788-
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
788+
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}]):
789789
new_models_found.update(installer.heuristic_import(path))
790790
scanned_dirs.add(path)
791791

@@ -794,7 +794,8 @@ def autoimport(self)->Dict[str, AddModelResult]:
794794
if path in known_paths or path.parent in scanned_dirs:
795795
continue
796796
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
797-
new_models_found.update(installer.heuristic_import(path))
797+
import_result = installer.heuristic_import(path)
798+
new_models_found.update(import_result)
798799

799800
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
800801
installed.update(new_models_found)

invokeai/backend/model_management/model_probe.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@ def probe(cls,
7878
format_type = 'diffusers' if model_path.is_dir() else 'checkpoint'
7979
else:
8080
format_type = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint'
81-
8281
model_info = None
8382
try:
8483
model_type = cls.get_model_type_from_folder(model_path, model) \
@@ -105,7 +104,7 @@ def probe(cls,
105104
) else 512,
106105
)
107106
except Exception:
108-
return None
107+
raise
109108

110109
return model_info
111110

@@ -127,6 +126,8 @@ def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: dict) -> M
127126
return ModelType.Vae
128127
elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}):
129128
return ModelType.Lora
129+
elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}):
130+
return ModelType.Lora
130131
elif any(key.startswith(v) for v in {"control_model", "input_blocks"}):
131132
return ModelType.ControlNet
132133
elif key in {"emb_params", "string_to_param"}:
@@ -137,7 +138,7 @@ def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: dict) -> M
137138
if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()):
138139
return ModelType.TextualInversion
139140

140-
raise ValueError("Unable to determine model type")
141+
raise ValueError(f"Unable to determine model type for {model_path}")
141142

142143
@classmethod
143144
def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin)->ModelType:
@@ -167,7 +168,7 @@ def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin)->Model
167168
return type
168169

169170
# give up
170-
raise ValueError("Unable to determine model type")
171+
raise ValueError("Unable to determine model type for {folder_path}")
171172

172173
@classmethod
173174
def _scan_and_load_checkpoint(cls,model_path: Path)->dict:

invokeai/frontend/install/model_install.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -678,9 +678,8 @@ def select_and_download_models(opt: Namespace):
678678

679679
# this is where the TUI is called
680680
else:
681-
# needed because the torch library is loaded, even though we don't use it
682-
# currently commented out because it has started generating errors (?)
683-
# torch.multiprocessing.set_start_method("spawn")
681+
# needed to support the probe() method running under a subprocess
682+
torch.multiprocessing.set_start_method("spawn")
684683

685684
# the third argument is needed in the Windows 11 environment in
686685
# order to launch and resize a console window running this program

0 commit comments

Comments
 (0)