Skip to content

Commit 7ea9951

Browse files
author
Lincoln Stein
committed
fixes to env parsing, textual inversion & help text
- Make environment variable settings case InSenSiTive: INVOKEAI_MAX_LOADED_MODELS and InvokeAI_Max_Loaded_Models environment variables will both set `max_loaded_models` - Updated realesrgan to use new config system. - Updated textual_inversion_training to use new config system. - Discovered a race condition when InvokeAIAppConfig is created at module load time, which makes it impossible to customize or replace the help message produced with --help on the command line. To fix this, moved all instances of get_invokeai_config() from module load time to object initialization time. Makes code cleaner, too. - Added `--from_file` argument to `invokeai-node-cli` and changed github action to match. CI tests will hopefully work now.
1 parent f9710dd commit 7ea9951

File tree

22 files changed

+118
-116
lines changed

22 files changed

+118
-116
lines changed

.github/workflows/test-invoke-pip.yml

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,7 @@ jobs:
8080
uses: actions/checkout@v3
8181

8282
- name: set test prompt to main branch validation
83-
if: ${{ github.ref == 'refs/heads/main' }}
84-
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
85-
86-
- name: set test prompt to Pull Request validation
87-
if: ${{ github.ref != 'refs/heads/main' }}
88-
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
83+
run:echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
8984

9085
- name: setup python
9186
uses: actions/setup-python@v4
@@ -131,7 +126,7 @@ jobs:
131126
--precision=float32
132127
--always_use_cpu
133128
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
134-
< ${{ env.TEST_PROMPTS }}
129+
--from_file ${{ env.TEST_PROMPTS }}
135130
136131
- name: Archive results
137132
id: archive-results

invokeai/app/cli_app.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import os
55
import re
66
import shlex
7+
import sys
78
import time
89
from typing import (
910
Union,
@@ -195,6 +196,11 @@ def invoke_cli():
195196
parser = config.get_parser()
196197
parser.add_argument('commands',nargs='*')
197198
invocation_commands = parser.parse_args().commands
199+
200+
# get the optional file to read commands from.
201+
# Simplest is to use it for STDIN
202+
if infile := config.from_file:
203+
sys.stdin = open(infile,"r")
198204

199205
model_manager = get_model_manager(config,logger=logger)
200206

invokeai/app/services/config.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein)
1+
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team
22

33
'''Invokeai configuration system.
44
@@ -206,8 +206,16 @@ def add_parser_arguments(cls, parser):
206206
if cls.initconf and settings_stanza in cls.initconf \
207207
else OmegaConf.create()
208208

209+
# create an upcase version of the environment in
210+
# order to achieve case-insensitive environment
211+
# variables (the way Windows does)
212+
upcase_environ = dict()
213+
for key,value in os.environ.items():
214+
upcase_environ[key.upper()] = value
215+
209216
fields = cls.__fields__
210217
cls.argparse_groups = {}
218+
211219
for name, field in fields.items():
212220
if name not in cls._excluded():
213221
current_default = field.default
@@ -216,8 +224,8 @@ def add_parser_arguments(cls, parser):
216224
env_name = env_prefix + '_' + name
217225
if category in initconf and name in initconf.get(category):
218226
field.default = initconf.get(category).get(name)
219-
if env_name in os.environ:
220-
field.default = os.environ[env_name]
227+
if env_name.upper() in upcase_environ:
228+
field.default = upcase_environ[env_name.upper()]
221229
cls.add_field_argument(parser, name, field)
222230

223231
field.default = current_default
@@ -353,6 +361,7 @@ class InvokeAIAppConfig(InvokeAISettings):
353361
legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths')
354362
lora_dir : Path = Field(default='loras', description='Path to InvokeAI LoRA model directory', category='Paths')
355363
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
364+
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
356365

357366
model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models')
358367
embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models')
@@ -502,11 +511,11 @@ def print_help(self, file=None):
502511
text = self.format_help()
503512
pydoc.pager(text)
504513

505-
def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig)->InvokeAISettings:
514+
def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAISettings:
506515
'''
507516
This returns a singleton InvokeAIAppConfig configuration object.
508517
'''
509518
global global_config
510519
if global_config is None or type(global_config)!=cls:
511-
global_config = cls()
520+
global_config = cls(**kwargs)
512521
return global_config

invokeai/backend/config/invokeai_configure.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -389,8 +389,8 @@ def create(self):
389389
)
390390
self.nextrely += 1
391391
for i in [
392-
"If you have an account at HuggingFace you may paste your access token here",
393-
'to allow InvokeAI to download styles & subjects from the "Concept Library".',
392+
"If you have an account at HuggingFace you may optionally paste your access token here",
393+
'to allow InvokeAI to download restricted styles & subjects from the "Concept Library".',
394394
"See https://huggingface.co/settings/tokens",
395395
]:
396396
self.add_widget_intelligent(
@@ -593,6 +593,9 @@ def marshall_arguments(self):
593593
new_opts.hf_token = self.hf_token.value
594594
new_opts.license_acceptance = self.license_acceptance.value
595595
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
596+
597+
# widget library workaround to make max_loaded_models an int rather than a float
598+
new_opts.max_loaded_models = int(new_opts.max_loaded_models)
596599

597600
return new_opts
598601

invokeai/backend/image_util/patchmatch.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
import invokeai.backend.util.logging as logger
99
from invokeai.app.services.config import get_invokeai_config
1010

11-
config = get_invokeai_config()
12-
1311
class PatchMatch:
1412
"""
1513
Thin class wrapper around the patchmatch function.
@@ -23,6 +21,7 @@ def __init__(self):
2321

2422
@classmethod
2523
def _load_patch_match(self):
24+
config = get_invokeai_config()
2625
if self.tried_load:
2726
return
2827
if config.try_patchmatch:

invokeai/backend/image_util/txt2mask.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737

3838
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
3939
CLIPSEG_SIZE = 352
40-
config = get_invokeai_config()
4140

4241
class SegmentedGrayscale(object):
4342
def __init__(self, image: Image, heatmap: torch.Tensor):
@@ -84,6 +83,7 @@ class Txt2Mask(object):
8483

8584
def __init__(self, device="cpu", refined=False):
8685
logger.info("Initializing clipseg model for text to mask inference")
86+
config = get_invokeai_config()
8787

8888
# BUG: we are not doing anything with the device option at this time
8989
self.device = device

invokeai/backend/model_management/convert_ckpt_to_diffusers.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,6 @@
7474

7575
from ..stable_diffusion import StableDiffusionGeneratorPipeline
7676

77-
config = get_invokeai_config()
78-
7977
def shave_segments(path, n_shave_prefix_segments=1):
8078
"""
8179
Removes segments. Positive values shave the first segments, negative shave the last segments.
@@ -844,7 +842,7 @@ def _copy_layers(hf_layers, pt_layers):
844842

845843
def convert_ldm_clip_checkpoint(checkpoint):
846844
text_model = CLIPTextModel.from_pretrained(
847-
"openai/clip-vit-large-patch14", cache_dir=config.cache_dir
845+
"openai/clip-vit-large-patch14", cache_dir=get_invokeai_config().cache_dir
848846
)
849847

850848
keys = list(checkpoint.keys())
@@ -899,7 +897,7 @@ def convert_ldm_clip_checkpoint(checkpoint):
899897

900898

901899
def convert_paint_by_example_checkpoint(checkpoint):
902-
cache_dir = config.cache_dir
900+
cache_dir = get_invokeai_config().cache_dir
903901
config = CLIPVisionConfig.from_pretrained(
904902
"openai/clip-vit-large-patch14", cache_dir=cache_dir
905903
)
@@ -971,7 +969,7 @@ def convert_paint_by_example_checkpoint(checkpoint):
971969

972970

973971
def convert_open_clip_checkpoint(checkpoint):
974-
cache_dir = config.cache_dir
972+
cache_dir = get_invokeai_config().cache_dir
975973
text_model = CLIPTextModel.from_pretrained(
976974
"stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir
977975
)
@@ -1094,7 +1092,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
10941092
:param vae: A diffusers VAE to load into the pipeline.
10951093
:param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline.
10961094
"""
1097-
1095+
config = get_invokeai_config()
10981096
with warnings.catch_warnings():
10991097
warnings.simplefilter("ignore")
11001098
verbosity = dlogging.get_verbosity()

invokeai/backend/model_management/model_manager.py

Lines changed: 26 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@ class SDModelComponent(Enum):
6868
feature_extractor="feature_extractor"
6969

7070
DEFAULT_MAX_MODELS = 2
71-
config = get_invokeai_config()
7271

7372
class ModelManager(object):
7473
"""
@@ -99,6 +98,7 @@ def __init__(
9998
if not isinstance(config, DictConfig):
10099
config = OmegaConf.load(config)
101100
self.config = config
101+
self.globals = get_invokeai_config()
102102
self.precision = precision
103103
self.device = torch.device(device_type)
104104
self.max_loaded_models = max_loaded_models
@@ -291,7 +291,7 @@ def is_legacy(self, model_name: str) -> bool:
291291
"""
292292
# if we are converting legacy files automatically, then
293293
# there are no legacy ckpts!
294-
if config.ckpt_convert:
294+
if self.globals.ckpt_convert:
295295
return False
296296
info = self.model_info(model_name)
297297
if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")):
@@ -501,13 +501,13 @@ def _load_diffusers_model(self, mconfig):
501501

502502
# TODO: scan weights maybe?
503503
pipeline_args: dict[str, Any] = dict(
504-
safety_checker=None, local_files_only=not config.internet_available
504+
safety_checker=None, local_files_only=not self.globals.internet_available
505505
)
506506
if "vae" in mconfig and mconfig["vae"] is not None:
507507
if vae := self._load_vae(mconfig["vae"]):
508508
pipeline_args.update(vae=vae)
509509
if not isinstance(name_or_path, Path):
510-
pipeline_args.update(cache_dir=config.cache_dir)
510+
pipeline_args.update(cache_dir=self.globals.cache_dir)
511511
if using_fp16:
512512
pipeline_args.update(torch_dtype=torch.float16)
513513
fp_args_list = [{"revision": "fp16"}, {}]
@@ -559,10 +559,9 @@ def _load_ckpt_model(self, model_name, mconfig):
559559
width = mconfig.width
560560
height = mconfig.height
561561

562-
if not os.path.isabs(config):
563-
config = os.path.join(config.root, config)
564-
if not os.path.isabs(weights):
565-
weights = os.path.normpath(os.path.join(config.root, weights))
562+
root_dir = self.globals.root_dir
563+
config = str(root_dir / config)
564+
weights = str(root_dir / weights)
566565

567566
# Convert to diffusers and return a diffusers pipeline
568567
self.logger.info(f"Converting legacy checkpoint {model_name} into a diffusers model...")
@@ -577,11 +576,7 @@ def _load_ckpt_model(self, model_name, mconfig):
577576

578577
vae_path = None
579578
if vae:
580-
vae_path = (
581-
vae
582-
if os.path.isabs(vae)
583-
else os.path.normpath(os.path.join(config.root, vae))
584-
)
579+
vae_path = str(root_dir / vae)
585580
if self._has_cuda():
586581
torch.cuda.empty_cache()
587582
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
@@ -613,9 +608,7 @@ def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path:
613608
)
614609

615610
if "path" in mconfig and mconfig["path"] is not None:
616-
path = Path(mconfig["path"])
617-
if not path.is_absolute():
618-
path = Path(config.root, path).resolve()
611+
path = self.globals.root_dir / Path(mconfig["path"])
619612
return path
620613
elif "repo_id" in mconfig:
621614
return mconfig["repo_id"]
@@ -863,16 +856,16 @@ def heuristic_import(
863856
model_type = self.probe_model_type(checkpoint)
864857
if model_type == SDLegacyType.V1:
865858
self.logger.debug("SD-v1 model detected")
866-
model_config_file = config.legacy_conf_path / "v1-inference.yaml"
859+
model_config_file = self.globals.legacy_conf_path / "v1-inference.yaml"
867860
elif model_type == SDLegacyType.V1_INPAINT:
868861
self.logger.debug("SD-v1 inpainting model detected")
869-
model_config_file = config.legacy_conf_path / "v1-inpainting-inference.yaml",
862+
model_config_file = self.globals.legacy_conf_path / "v1-inpainting-inference.yaml",
870863
elif model_type == SDLegacyType.V2_v:
871864
self.logger.debug("SD-v2-v model detected")
872-
model_config_file = config.legacy_conf_path / "v2-inference-v.yaml"
865+
model_config_file = self.globals.legacy_conf_path / "v2-inference-v.yaml"
873866
elif model_type == SDLegacyType.V2_e:
874867
self.logger.debug("SD-v2-e model detected")
875-
model_config_file = config.legacy_conf_path / "v2-inference.yaml"
868+
model_config_file = self.globals.legacy_conf_path / "v2-inference.yaml"
876869
elif model_type == SDLegacyType.V2:
877870
self.logger.warning(
878871
f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
@@ -899,7 +892,7 @@ def heuristic_import(
899892
self.logger.debug(f"Using VAE file {vae_path.name}")
900893
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
901894

902-
diffuser_path = config.root / "models/converted_ckpts" / model_path.stem
895+
diffuser_path = self.globals.root_dir / "models/converted_ckpts" / model_path.stem
903896
model_name = self.convert_and_import(
904897
model_path,
905898
diffusers_path=diffuser_path,
@@ -1032,7 +1025,7 @@ def commit(self, config_file_path: str) -> None:
10321025
"""
10331026
yaml_str = OmegaConf.to_yaml(self.config)
10341027
if not os.path.isabs(config_file_path):
1035-
config_file_path = config.model_conf_path
1028+
config_file_path = self.globals.model_conf_path
10361029
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
10371030
with open(tmpfile, "w", encoding="utf-8") as outfile:
10381031
outfile.write(self.preamble())
@@ -1064,7 +1057,8 @@ def migrate_models(cls):
10641057
"""
10651058
# Three transformer models to check: bert, clip and safety checker, and
10661059
# the diffusers as well
1067-
models_dir = config.root / "models"
1060+
config = get_invokeai_config()
1061+
models_dir = config.root_dir / "models"
10681062
legacy_locations = [
10691063
Path(
10701064
models_dir,
@@ -1138,13 +1132,12 @@ def _resolve_path(
11381132
if str(source).startswith(("http:", "https:", "ftp:")):
11391133
dest_directory = Path(dest_directory)
11401134
if not dest_directory.is_absolute():
1141-
dest_directory = config.root / dest_directory
1135+
dest_directory = self.globals.root_dir / dest_directory
11421136
dest_directory.mkdir(parents=True, exist_ok=True)
11431137
resolved_path = download_with_resume(str(source), dest_directory)
11441138
else:
1145-
if not os.path.isabs(source):
1146-
source = config.root / source
1147-
resolved_path = Path(source)
1139+
source = self.globals.root_dir / source
1140+
resolved_path = source
11481141
return resolved_path
11491142

11501143
def _invalidate_cached_model(self, model_name: str) -> None:
@@ -1194,7 +1187,7 @@ def _diffuser_sha256(
11941187
path = name_or_path
11951188
else:
11961189
owner, repo = name_or_path.split("/")
1197-
path = Path(config.cache_dir / f"models--{owner}--{repo}")
1190+
path = self.globals.cache_dir / f"models--{owner}--{repo}"
11981191
if not path.exists():
11991192
return None
12001193
hashpath = path / "checksum.sha256"
@@ -1255,8 +1248,8 @@ def _load_vae(self, vae_config) -> AutoencoderKL:
12551248
using_fp16 = self.precision == "float16"
12561249

12571250
vae_args.update(
1258-
cache_dir=config.cache_dir,
1259-
local_files_only=not config.internet_available,
1251+
cache_dir=self.globals.cache_dir,
1252+
local_files_only=not self.globals.internet_available,
12601253
)
12611254

12621255
self.logger.debug(f"Loading diffusers VAE from {name_or_path}")
@@ -1294,7 +1287,7 @@ def _load_vae(self, vae_config) -> AutoencoderKL:
12941287

12951288
@classmethod
12961289
def _delete_model_from_cache(cls,repo_id):
1297-
cache_info = scan_cache_dir(config.cache_dir)
1290+
cache_info = scan_cache_dir(get_invokeai_config().cache_dir)
12981291

12991292
# I'm sure there is a way to do this with comprehensions
13001293
# but the code quickly became incomprehensible!
@@ -1311,9 +1304,10 @@ def _delete_model_from_cache(cls,repo_id):
13111304

13121305
@staticmethod
13131306
def _abs_path(path: str | Path) -> Path:
1307+
globals = get_invokeai_config()
13141308
if path is None or Path(path).is_absolute():
13151309
return path
1316-
return Path(config.root, path).resolve()
1310+
return Path(globals.root_dir, path).resolve()
13171311

13181312
@staticmethod
13191313
def _is_huggingface_hub_directory_present() -> bool:

0 commit comments

Comments
 (0)