Skip to content

Commit ce6629b

Browse files
authored
Merge branch 'v2.3' into enhance/increase-sha256-chunksize
2 parents a45b338 + 994a76a commit ce6629b

File tree

17 files changed

+434
-335
lines changed

17 files changed

+434
-335
lines changed

installer/lib/installer.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -132,13 +132,12 @@ def app_venv(self, path: str = None):
132132

133133
# Prefer to copy python executables
134134
# so that updates to system python don't break InvokeAI
135-
if not venv_dir.exists():
136-
try:
137-
venv.create(venv_dir, with_pip=True)
138-
# If installing over an existing environment previously created with symlinks,
139-
# the executables will fail to copy. Keep symlinks in that case
140-
except shutil.SameFileError:
141-
venv.create(venv_dir, with_pip=True, symlinks=True)
135+
try:
136+
venv.create(venv_dir, with_pip=True)
137+
# If installing over an existing environment previously created with symlinks,
138+
# the executables will fail to copy. Keep symlinks in that case
139+
except shutil.SameFileError:
140+
venv.create(venv_dir, with_pip=True, symlinks=True)
142141

143142
# upgrade pip in Python 3.9 environments
144143
if int(platform.python_version_tuple()[1]) == 9:

invokeai/backend/invoke_ai_web_server.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,11 @@
3737
Globals,
3838
global_converted_ckpts_dir,
3939
global_models_dir,
40-
global_lora_models_dir,
4140
)
4241
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
4342
from compel.prompt_parser import Blend
4443
from ldm.invoke.merge_diffusers import merge_diffusion_models
44+
from ldm.modules.lora_manager import LoraManager
4545

4646
# Loading Arguments
4747
opt = Args()
@@ -523,20 +523,12 @@ def merge_diffusers_models(model_merge_info: dict):
523523
@socketio.on("getLoraModels")
524524
def get_lora_models():
525525
try:
526-
lora_path = global_lora_models_dir()
527-
loras = []
528-
for root, _, files in os.walk(lora_path):
529-
models = [
530-
Path(root, x)
531-
for x in files
532-
if Path(x).suffix in [".ckpt", ".pt", ".safetensors"]
533-
]
534-
loras = loras + models
535-
526+
model = self.generate.model
527+
lora_mgr = LoraManager(model)
528+
loras = lora_mgr.list_compatible_loras()
536529
found_loras = []
537-
for lora in sorted(loras, key=lambda s: s.stem.lower()):
538-
location = str(lora.resolve()).replace("\\", "/")
539-
found_loras.append({"name": lora.stem, "location": location})
530+
for lora in sorted(loras, key=str.casefold):
531+
found_loras.append({"name":lora,"location":str(loras[lora])})
540532
socketio.emit("foundLoras", found_loras)
541533
except Exception as e:
542534
self.handle_exceptions(e)

invokeai/configs/INITIAL_MODELS.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,8 @@ trinart-2.0:
8080
repo_id: stabilityai/sd-vae-ft-mse
8181
recommended: False
8282
waifu-diffusion-1.4:
83-
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
83+
description: An SD-2.1 model trained on 5.4M anime/manga-style images (4.27 GB)
84+
revision: main
8485
repo_id: hakurei/waifu-diffusion
8586
format: diffusers
8687
vae:

invokeai/frontend/dist/assets/index-f56b39bc.js renamed to invokeai/frontend/dist/assets/index-b12e648e.js

Lines changed: 36 additions & 36 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

invokeai/frontend/dist/index.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
66
<title>InvokeAI - A Stable Diffusion Toolkit</title>
77
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
8-
<script type="module" crossorigin src="./assets/index-f56b39bc.js"></script>
8+
<script type="module" crossorigin src="./assets/index-b12e648e.js"></script>
99
<link rel="stylesheet" href="./assets/index-2ab0eb58.css">
1010
</head>
1111

invokeai/frontend/src/app/socketio/listeners.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,10 @@ import {
3333
setIntermediateImage,
3434
} from 'features/gallery/store/gallerySlice';
3535

36+
import {
37+
getLoraModels,
38+
getTextualInversionTriggers,
39+
} from 'app/socketio/actions';
3640
import type { RootState } from 'app/store';
3741
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
3842
import {
@@ -463,6 +467,8 @@ const makeSocketIOListeners = (
463467
const { model_name, model_list } = data;
464468
dispatch(setModelList(model_list));
465469
dispatch(setCurrentStatus(i18n.t('common.statusModelChanged')));
470+
dispatch(getLoraModels());
471+
dispatch(getTextualInversionTriggers());
466472
dispatch(setIsProcessing(false));
467473
dispatch(setIsCancelable(true));
468474
dispatch(

invokeai/frontend/stats.html

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

ldm/generate.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -633,9 +633,8 @@ def process_image(image,seed):
633633
except RuntimeError:
634634
# Clear the CUDA cache on an exception
635635
self.clear_cuda_cache()
636-
637-
print(traceback.format_exc(), file=sys.stderr)
638-
print(">> Could not generate image.")
636+
print("** Could not generate image.")
637+
raise
639638

640639
toc = time.time()
641640
print("\n>> Usage stats:")

ldm/invoke/CLI.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import sys
55
import traceback
66
from argparse import Namespace
7+
from packaging import version
78
from pathlib import Path
89
from typing import Union
910

@@ -25,6 +26,7 @@
2526
from .globals import Globals, global_config_dir
2627
from .image_util import make_grid
2728
from .log import write_log
29+
from .model_manager import ModelManager
2830
from .pngwriter import PngWriter, retrieve_metadata, write_metadata
2931
from .readline import Completer, get_completer
3032
from ..util import url_attachment_name
@@ -64,6 +66,9 @@ def main():
6466
Globals.sequential_guidance = args.sequential_guidance
6567
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
6668

69+
# run any post-install patches needed
70+
run_patches()
71+
6772
print(f">> Internet connectivity is {Globals.internet_available}")
6873

6974
if not args.conf:
@@ -108,6 +113,9 @@ def main():
108113
if opt.lora_path:
109114
Globals.lora_models_dir = opt.lora_path
110115

116+
# migrate legacy models
117+
ModelManager.migrate_models()
118+
111119
# load the infile as a list of lines
112120
if opt.infile:
113121
try:
@@ -1291,6 +1299,62 @@ def retrieve_last_used_model()->str:
12911299
with open(model_file_path,'r') as f:
12921300
return f.readline()
12931301

1302+
# This routine performs any patch-ups needed after installation
1303+
def run_patches():
1304+
install_missing_config_files()
1305+
version_file = Path(Globals.root,'.version')
1306+
if version_file.exists():
1307+
with open(version_file,'r') as f:
1308+
root_version = version.parse(f.readline() or 'v2.3.2')
1309+
else:
1310+
root_version = version.parse('v2.3.2')
1311+
app_version = version.parse(ldm.invoke.__version__)
1312+
if root_version < app_version:
1313+
try:
1314+
do_version_update(root_version, ldm.invoke.__version__)
1315+
with open(version_file,'w') as f:
1316+
f.write(ldm.invoke.__version__)
1317+
except:
1318+
print("** Update failed. Will try again on next launch")
1319+
1320+
def install_missing_config_files():
1321+
"""
1322+
install ckpt configuration files that may have been added to the
1323+
distro after original root directory configuration
1324+
"""
1325+
import invokeai.configs as conf
1326+
from shutil import copyfile
1327+
1328+
root_configs = Path(global_config_dir(), 'stable-diffusion')
1329+
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
1330+
for src in repo_configs.iterdir():
1331+
dest = root_configs / src.name
1332+
if not dest.exists():
1333+
copyfile(src,dest)
1334+
1335+
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
1336+
"""
1337+
Make any updates to the launcher .sh and .bat scripts that may be needed
1338+
from release to release. This is not an elegant solution. Instead, the
1339+
launcher should be moved into the source tree and installed using pip.
1340+
"""
1341+
if root_version < version.Version('v2.3.4'):
1342+
dest = Path(Globals.root,'loras')
1343+
dest.mkdir(exist_ok=True)
1344+
if root_version < version.Version('v2.3.3'):
1345+
if sys.platform == "linux":
1346+
print('>> Downloading new version of launcher script and its config file')
1347+
from ldm.util import download_with_progress_bar
1348+
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
1349+
1350+
dest = Path(Globals.root,'invoke.sh.in')
1351+
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
1352+
dest.replace(Path(Globals.root,'invoke.sh'))
1353+
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
1354+
1355+
dest = Path(Globals.root,'dialogrc')
1356+
assert download_with_progress_bar(url_base+'dialogrc',dest)
1357+
dest.replace(Path(Globals.root,'.dialogrc'))
12941358

12951359
if __name__ == '__main__':
12961360
main()

ldm/invoke/config/invokeai_configure.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from shutil import get_terminal_size
2222

2323
import npyscreen
24+
import torch
2425
import transformers
2526
from diffusers import AutoencoderKL
2627
from huggingface_hub import HfFolder
@@ -663,19 +664,8 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
663664
configs_src = Path(configs.__path__[0])
664665
configs_dest = Path(root) / "configs"
665666
if not os.path.samefile(configs_src, configs_dest):
666-
shutil.copytree(configs_src,
667-
configs_dest,
668-
dirs_exist_ok=True,
669-
copy_function=shutil.copyfile,
670-
)
671-
# Fix up directory permissions so that they are writable
672-
# This can happen when running under Nix environment which
673-
# makes the runtime directory template immutable.
674-
for root,dirs,files in os.walk(os.path.join(root,name)):
675-
for d in dirs:
676-
Path(root,d).chmod(0o775)
677-
for f in files:
678-
Path(root,d).chmod(0o644)
667+
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
668+
679669

680670
# -------------------------------------
681671
def run_console_ui(

0 commit comments

Comments
 (0)