Skip to content

Commit 9c2f325

Browse files
authored
use diffusers 0.14 cache layout, upgrade transformers, safetensors, accelerate (#2913)
This PR ports the `main` PR #2871 to the v2.3 branch. This adjusts the global diffusers model cache to work with the 0.14 diffusers layout of placing models in HF_HOME/hub rather than HF_HOME/diffusers. It also implements the one-time migration action to the new layout.
2 parents 8323169 + acf955f commit 9c2f325

File tree

9 files changed

+44
-50
lines changed

9 files changed

+44
-50
lines changed

ldm/invoke/CLI.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -960,7 +960,6 @@ def prepare_image_metadata(
960960
wildcards["seed"] = seed
961961
wildcards["model_id"] = model_id
962962
try:
963-
print(f'DEBUG: fnformat={opt.fnformat}')
964963
filename = opt.fnformat.format(**wildcards)
965964
except KeyError as e:
966965
print(

ldm/invoke/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__='2.3.1.post2'
1+
__version__='2.3.2'

ldm/invoke/config/invokeai_configure.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ def download_vaes():
290290
# first the diffusers version
291291
repo_id = "stabilityai/sd-vae-ft-mse"
292292
args = dict(
293-
cache_dir=global_cache_dir("diffusers"),
293+
cache_dir=global_cache_dir("hub"),
294294
)
295295
if not AutoencoderKL.from_pretrained(repo_id, **args):
296296
raise Exception(f"download of {repo_id} failed")

ldm/invoke/config/model_install_backend.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,6 @@ def _download_diffusion_weights(
262262
path = download_from_hf(
263263
model_class,
264264
repo_id,
265-
cache_subdir="diffusers",
266265
safety_checker=None,
267266
**extra_args,
268267
)

ldm/invoke/globals.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -88,16 +88,13 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path:
8888
'''
8989
Returns Path to the model cache directory. If a subdirectory
9090
is provided, it will be appended to the end of the path, allowing
91-
for huggingface-style conventions:
92-
global_cache_dir('diffusers')
91+
for Hugging Face-style conventions. Currently, Hugging Face has
92+
moved all models into the "hub" subfolder, so for any pretrained
93+
HF model, use:
9394
global_cache_dir('hub')
94-
Current HuggingFace documentation (mid-Jan 2023) indicates that
95-
transformers models will be cached into a "transformers" subdirectory,
96-
but in practice they seem to go into "hub". But if needed:
97-
global_cache_dir('transformers')
98-
One other caveat is that HuggingFace is moving some diffusers models
99-
into the "hub" subdirectory as well, so this will need to be revisited
100-
from time to time.
95+
96+
The legacy location for transformers used to be global_cache_dir('transformers')
97+
and global_cache_dir('diffusers') for diffusers.
10198
'''
10299
home: str = os.getenv('HF_HOME')
103100

ldm/invoke/merge_diffusers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -437,10 +437,10 @@ def main():
437437
args = _parse_args()
438438
global_set_root(args.root_dir)
439439

440-
cache_dir = str(global_cache_dir("diffusers"))
440+
cache_dir = str(global_cache_dir("hub"))
441441
os.environ[
442442
"HF_HOME"
443-
] = cache_dir # because not clear the merge pipeline is honoring cache_dir
443+
] = str(global_cache_dir()) # because not clear the merge pipeline is honoring cache_dir
444444
args.cache_dir = cache_dir
445445

446446
try:

ldm/invoke/model_manager.py

Lines changed: 29 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,7 @@ def _load_diffusers_model(self, mconfig):
507507
if vae := self._load_vae(mconfig["vae"]):
508508
pipeline_args.update(vae=vae)
509509
if not isinstance(name_or_path, Path):
510-
pipeline_args.update(cache_dir=global_cache_dir("diffusers"))
510+
pipeline_args.update(cache_dir=global_cache_dir("hub"))
511511
if using_fp16:
512512
pipeline_args.update(torch_dtype=torch.float16)
513513
fp_args_list = [{"revision": "fp16"}, {}]
@@ -1093,27 +1093,39 @@ def migrate_models(cls):
10931093
to the 2.3.0 "diffusers" version. This should be a one-time operation, called at
10941094
script startup time.
10951095
"""
1096-
# Three transformer models to check: bert, clip and safety checker
1096+
# Three transformer models to check: bert, clip and safety checker, and
1097+
# the diffusers as well
1098+
models_dir = Path(Globals.root, "models")
10971099
legacy_locations = [
10981100
Path(
1101+
models_dir,
10991102
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker"
11001103
),
11011104
Path("bert-base-uncased/models--bert-base-uncased"),
11021105
Path(
11031106
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
11041107
),
11051108
]
1106-
models_dir = Path(Globals.root, "models")
1109+
legacy_locations.extend(list(global_cache_dir("diffusers").glob('*')))
11071110
legacy_layout = False
11081111
for model in legacy_locations:
1109-
legacy_layout = legacy_layout or Path(models_dir, model).exists()
1112+
legacy_layout = legacy_layout or model.exists()
11101113
if not legacy_layout:
11111114
return
11121115

11131116
print(
1114-
"** Legacy version <= 2.2.5 model directory layout detected. Reorganizing."
1117+
"""
1118+
>> ALERT:
1119+
>> The location of your previously-installed diffusers models needs to move from
1120+
>> invokeai/models/diffusers to invokeai/models/hub due to a change introduced by
1121+
>> diffusers version 0.14. InvokeAI will now move all models from the "diffusers" directory
1122+
>> into "hub" and then remove the diffusers directory. This is a quick, safe, one-time
1123+
>> operation. However if you have customized either of these directories and need to
1124+
>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready.
1125+
>> Otherwise press <enter> to continue."""
11151126
)
11161127
print("** This is a quick one-time operation.")
1128+
input("continue> ")
11171129

11181130
# transformer files get moved into the hub directory
11191131
if cls._is_huggingface_hub_directory_present():
@@ -1125,33 +1137,20 @@ def migrate_models(cls):
11251137
for model in legacy_locations:
11261138
source = models_dir / model
11271139
dest = hub / model.stem
1140+
if dest.exists() and not source.exists():
1141+
continue
11281142
print(f"** {source} => {dest}")
11291143
if source.exists():
1130-
if dest.exists():
1131-
rmtree(source)
1144+
if dest.is_symlink():
1145+
print(f"** Found symlink at {dest.name}. Not migrating.")
1146+
elif dest.exists():
1147+
if source.is_dir():
1148+
rmtree(source)
1149+
else:
1150+
source.unlink()
11321151
else:
11331152
move(source, dest)
1134-
1135-
# anything else gets moved into the diffusers directory
1136-
if cls._is_huggingface_hub_directory_present():
1137-
diffusers = global_cache_dir("diffusers")
1138-
else:
1139-
diffusers = models_dir / "diffusers"
1140-
1141-
os.makedirs(diffusers, exist_ok=True)
1142-
for root, dirs, _ in os.walk(models_dir, topdown=False):
1143-
for dir in dirs:
1144-
full_path = Path(root, dir)
1145-
if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers):
1146-
continue
1147-
if Path(dir).match("models--*--*"):
1148-
dest = diffusers / dir
1149-
print(f"** {full_path} => {dest}")
1150-
if dest.exists():
1151-
rmtree(full_path)
1152-
else:
1153-
move(full_path, dest)
1154-
1153+
11551154
# now clean up by removing any empty directories
11561155
empty = [
11571156
root
@@ -1249,7 +1248,7 @@ def _diffuser_sha256(
12491248
path = name_or_path
12501249
else:
12511250
owner, repo = name_or_path.split("/")
1252-
path = Path(global_cache_dir("diffusers") / f"models--{owner}--{repo}")
1251+
path = Path(global_cache_dir("hub") / f"models--{owner}--{repo}")
12531252
if not path.exists():
12541253
return None
12551254
hashpath = path / "checksum.sha256"
@@ -1310,7 +1309,7 @@ def _load_vae(self, vae_config) -> AutoencoderKL:
13101309
using_fp16 = self.precision == "float16"
13111310

13121311
vae_args.update(
1313-
cache_dir=global_cache_dir("diffusers"),
1312+
cache_dir=global_cache_dir("hug"),
13141313
local_files_only=not Globals.internet_available,
13151314
)
13161315

ldm/invoke/training/textual_inversion_training.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -634,7 +634,7 @@ def do_textual_inversion_training(
634634
assert (
635635
pretrained_model_name_or_path
636636
), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}"
637-
pipeline_args = dict(cache_dir=global_cache_dir("diffusers"))
637+
pipeline_args = dict(cache_dir=global_cache_dir("hub"))
638638

639639
# Load tokenizer
640640
if tokenizer_name:

pyproject.toml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,13 @@ classifiers = [
2828
"Topic :: Scientific/Engineering :: Image Processing",
2929
]
3030
dependencies = [
31-
"accelerate",
31+
"accelerate~=0.16",
3232
"albumentations",
3333
"click",
3434
"clip_anytorch",
3535
"compel==0.1.7",
3636
"datasets",
37-
"diffusers[torch]~=0.13",
37+
"diffusers[torch]~=0.14",
3838
"dnspython==2.2.1",
3939
"einops",
4040
"eventlet",
@@ -63,7 +63,7 @@ dependencies = [
6363
"pytorch-lightning==1.7.7",
6464
"realesrgan",
6565
"requests==2.28.2",
66-
"safetensors",
66+
"safetensors~=0.3.0",
6767
"scikit-image>=0.19",
6868
"send2trash",
6969
"streamlit",
@@ -73,7 +73,7 @@ dependencies = [
7373
"torch>=1.13.1",
7474
"torchmetrics",
7575
"torchvision>=0.14.1",
76-
"transformers~=4.25",
76+
"transformers~=4.26",
7777
"windows-curses; sys_platform=='win32'",
7878
]
7979
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"

0 commit comments

Comments
 (0)