Skip to content

Commit 0060754

Browse files
author
Lincoln Stein
committed
Merge branch 'main' into release/invokeai-3-0-1
2 parents 52bd29d + 41b13e8 commit 0060754

File tree

9 files changed

+171
-155
lines changed

9 files changed

+171
-155
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ and go to http://localhost:9090.
123123

124124
### Command-Line Installation (for developers and users familiar with Terminals)
125125

126-
You must have Python 3.9 or 3.10 installed on your machine. Earlier or
126+
You must have Python 3.9 through 3.11 installed on your machine. Earlier or
127127
later versions are not supported.
128128
Node.js also needs to be installed along with yarn (can be installed with
129129
the command `npm install -g yarn` if needed)

docs/installation/010_INSTALL_AUTOMATED.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,8 @@ experimental versions later.
4040
this, open up a command-line window ("Terminal" on Linux and
4141
Macintosh, "Command" or "Powershell" on Windows) and type `python
4242
--version`. If Python is installed, it will print out the version
43-
number. If it is version `3.9.*` or `3.10.*`, you meet
44-
requirements. We do not recommend using Python 3.11 or higher,
45-
as not all the libraries that InvokeAI depends on work properly
46-
with this version.
43+
number. If it is version `3.9.*`, `3.10.*` or `3.11.*` you meet
44+
requirements.
4745

4846
!!! warning "What to do if you have an unsupported version"
4947

docs/installation/020_INSTALL_MANUAL.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ gaming):
3232

3333
* **Python**
3434

35-
version 3.9 or 3.10 (3.11 is not recommended).
35+
version 3.9 through 3.11
3636

3737
* **CUDA Tools**
3838

@@ -65,7 +65,7 @@ gaming):
6565
To install InvokeAI with virtual environments and the PIP package
6666
manager, please follow these steps:
6767

68-
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
68+
1. Please make sure you are using Python 3.9 through 3.11. The rest of the install
6969
procedure depends on this and will not work with other versions:
7070

7171
```bash

installer/install.sh.in

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,20 @@ cd $scriptdir
99
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
1010

1111
MINIMUM_PYTHON_VERSION=3.9.0
12-
MAXIMUM_PYTHON_VERSION=3.11.0
12+
MAXIMUM_PYTHON_VERSION=3.11.100
1313
PYTHON=""
14-
for candidate in python3.10 python3.9 python3 python ; do
14+
for candidate in python3.11 python3.10 python3.9 python3 python ; do
1515
if ppath=`which $candidate`; then
16+
# when using `pyenv`, the executable for an inactive Python version will exist but will not be operational
17+
# we check that this found executable can actually run
18+
if [ $($candidate --version &>/dev/null; echo ${PIPESTATUS}) -gt 0 ]; then continue; fi
19+
1620
python_version=$($ppath -V | awk '{ print $2 }')
1721
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
18-
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
19-
PYTHON=$ppath
20-
break
21-
fi
22+
if [ $(version $python_version) -le $(version "$MAXIMUM_PYTHON_VERSION") ]; then
23+
PYTHON=$ppath
24+
break
25+
fi
2226
fi
2327
fi
2428
done

invokeai/app/api/routers/models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ async def update_model(
9090
new_name=info.model_name,
9191
new_base=info.base_model,
9292
)
93-
logger.info(f"Successfully renamed {base_model}/{model_name}=>{info.base_model}/{info.model_name}")
93+
logger.info(f"Successfully renamed {base_model.value}/{model_name}=>{info.base_model}/{info.model_name}")
9494
# update information to support an update of attributes
9595
model_name = info.model_name
9696
base_model = info.base_model

invokeai/app/invocations/latent.py

Lines changed: 120 additions & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
from invokeai.app.invocations.metadata import CoreMetadata
1414
from invokeai.app.util.step_callback import stable_diffusion_step_callback
15-
from invokeai.backend.model_management.models.base import ModelType
15+
from invokeai.backend.model_management.models import ModelType, SilenceWarnings
1616

1717
from ...backend.model_management.lora import ModelPatcher
1818
from ...backend.stable_diffusion import PipelineIntermediateState
@@ -311,70 +311,71 @@ def prep_control_data(
311311

312312
@torch.no_grad()
313313
def invoke(self, context: InvocationContext) -> LatentsOutput:
314-
noise = context.services.latents.get(self.noise.latents_name)
314+
with SilenceWarnings():
315+
noise = context.services.latents.get(self.noise.latents_name)
315316

316-
# Get the source node id (we are invoking the prepared node)
317-
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
318-
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
317+
# Get the source node id (we are invoking the prepared node)
318+
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
319+
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
319320

320-
def step_callback(state: PipelineIntermediateState):
321-
self.dispatch_progress(context, source_node_id, state)
321+
def step_callback(state: PipelineIntermediateState):
322+
self.dispatch_progress(context, source_node_id, state)
322323

323-
def _lora_loader():
324-
for lora in self.unet.loras:
325-
lora_info = context.services.model_manager.get_model(
326-
**lora.dict(exclude={"weight"}),
327-
context=context,
328-
)
329-
yield (lora_info.context.model, lora.weight)
330-
del lora_info
331-
return
332-
333-
unet_info = context.services.model_manager.get_model(
334-
**self.unet.unet.dict(),
335-
context=context,
336-
)
337-
with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(
338-
unet_info.context.model, _lora_loader()
339-
), unet_info as unet:
340-
noise = noise.to(device=unet.device, dtype=unet.dtype)
324+
def _lora_loader():
325+
for lora in self.unet.loras:
326+
lora_info = context.services.model_manager.get_model(
327+
**lora.dict(exclude={"weight"}),
328+
context=context,
329+
)
330+
yield (lora_info.context.model, lora.weight)
331+
del lora_info
332+
return
341333

342-
scheduler = get_scheduler(
334+
unet_info = context.services.model_manager.get_model(
335+
**self.unet.unet.dict(),
343336
context=context,
344-
scheduler_info=self.unet.scheduler,
345-
scheduler_name=self.scheduler,
346337
)
338+
with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(
339+
unet_info.context.model, _lora_loader()
340+
), unet_info as unet:
341+
noise = noise.to(device=unet.device, dtype=unet.dtype)
347342

348-
pipeline = self.create_pipeline(unet, scheduler)
349-
conditioning_data = self.get_conditioning_data(context, scheduler, unet)
343+
scheduler = get_scheduler(
344+
context=context,
345+
scheduler_info=self.unet.scheduler,
346+
scheduler_name=self.scheduler,
347+
)
350348

351-
control_data = self.prep_control_data(
352-
model=pipeline,
353-
context=context,
354-
control_input=self.control,
355-
latents_shape=noise.shape,
356-
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
357-
do_classifier_free_guidance=True,
358-
exit_stack=exit_stack,
359-
)
349+
pipeline = self.create_pipeline(unet, scheduler)
350+
conditioning_data = self.get_conditioning_data(context, scheduler, unet)
360351

361-
# TODO: Verify the noise is the right size
362-
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
363-
latents=torch.zeros_like(noise, dtype=torch_dtype(unet.device)),
364-
noise=noise,
365-
num_inference_steps=self.steps,
366-
conditioning_data=conditioning_data,
367-
control_data=control_data, # list[ControlNetData]
368-
callback=step_callback,
369-
)
352+
control_data = self.prep_control_data(
353+
model=pipeline,
354+
context=context,
355+
control_input=self.control,
356+
latents_shape=noise.shape,
357+
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
358+
do_classifier_free_guidance=True,
359+
exit_stack=exit_stack,
360+
)
370361

371-
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
372-
result_latents = result_latents.to("cpu")
373-
torch.cuda.empty_cache()
362+
# TODO: Verify the noise is the right size
363+
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
364+
latents=torch.zeros_like(noise, dtype=torch_dtype(unet.device)),
365+
noise=noise,
366+
num_inference_steps=self.steps,
367+
conditioning_data=conditioning_data,
368+
control_data=control_data, # list[ControlNetData]
369+
callback=step_callback,
370+
)
374371

375-
name = f"{context.graph_execution_state_id}__{self.id}"
376-
context.services.latents.save(name, result_latents)
377-
return build_latents_output(latents_name=name, latents=result_latents)
372+
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
373+
result_latents = result_latents.to("cpu")
374+
torch.cuda.empty_cache()
375+
376+
name = f"{context.graph_execution_state_id}__{self.id}"
377+
context.services.latents.save(name, result_latents)
378+
return build_latents_output(latents_name=name, latents=result_latents)
378379

379380

380381
class LatentsToLatentsInvocation(TextToLatentsInvocation):
@@ -402,82 +403,83 @@ class Config(InvocationConfig):
402403

403404
@torch.no_grad()
404405
def invoke(self, context: InvocationContext) -> LatentsOutput:
405-
noise = context.services.latents.get(self.noise.latents_name)
406-
latent = context.services.latents.get(self.latents.latents_name)
406+
with SilenceWarnings(): # this quenches NSFW nag from diffusers
407+
noise = context.services.latents.get(self.noise.latents_name)
408+
latent = context.services.latents.get(self.latents.latents_name)
407409

408-
# Get the source node id (we are invoking the prepared node)
409-
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
410-
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
410+
# Get the source node id (we are invoking the prepared node)
411+
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
412+
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
411413

412-
def step_callback(state: PipelineIntermediateState):
413-
self.dispatch_progress(context, source_node_id, state)
414+
def step_callback(state: PipelineIntermediateState):
415+
self.dispatch_progress(context, source_node_id, state)
414416

415-
def _lora_loader():
416-
for lora in self.unet.loras:
417-
lora_info = context.services.model_manager.get_model(
418-
**lora.dict(exclude={"weight"}),
419-
context=context,
420-
)
421-
yield (lora_info.context.model, lora.weight)
422-
del lora_info
423-
return
424-
425-
unet_info = context.services.model_manager.get_model(
426-
**self.unet.unet.dict(),
427-
context=context,
428-
)
429-
with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(
430-
unet_info.context.model, _lora_loader()
431-
), unet_info as unet:
432-
noise = noise.to(device=unet.device, dtype=unet.dtype)
433-
latent = latent.to(device=unet.device, dtype=unet.dtype)
417+
def _lora_loader():
418+
for lora in self.unet.loras:
419+
lora_info = context.services.model_manager.get_model(
420+
**lora.dict(exclude={"weight"}),
421+
context=context,
422+
)
423+
yield (lora_info.context.model, lora.weight)
424+
del lora_info
425+
return
434426

435-
scheduler = get_scheduler(
427+
unet_info = context.services.model_manager.get_model(
428+
**self.unet.unet.dict(),
436429
context=context,
437-
scheduler_info=self.unet.scheduler,
438-
scheduler_name=self.scheduler,
439430
)
431+
with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(
432+
unet_info.context.model, _lora_loader()
433+
), unet_info as unet:
434+
noise = noise.to(device=unet.device, dtype=unet.dtype)
435+
latent = latent.to(device=unet.device, dtype=unet.dtype)
440436

441-
pipeline = self.create_pipeline(unet, scheduler)
442-
conditioning_data = self.get_conditioning_data(context, scheduler, unet)
437+
scheduler = get_scheduler(
438+
context=context,
439+
scheduler_info=self.unet.scheduler,
440+
scheduler_name=self.scheduler,
441+
)
443442

444-
control_data = self.prep_control_data(
445-
model=pipeline,
446-
context=context,
447-
control_input=self.control,
448-
latents_shape=noise.shape,
449-
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
450-
do_classifier_free_guidance=True,
451-
exit_stack=exit_stack,
452-
)
443+
pipeline = self.create_pipeline(unet, scheduler)
444+
conditioning_data = self.get_conditioning_data(context, scheduler, unet)
453445

454-
# TODO: Verify the noise is the right size
455-
initial_latents = (
456-
latent if self.strength < 1.0 else torch.zeros_like(latent, device=unet.device, dtype=latent.dtype)
457-
)
446+
control_data = self.prep_control_data(
447+
model=pipeline,
448+
context=context,
449+
control_input=self.control,
450+
latents_shape=noise.shape,
451+
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
452+
do_classifier_free_guidance=True,
453+
exit_stack=exit_stack,
454+
)
458455

459-
timesteps, _ = pipeline.get_img2img_timesteps(
460-
self.steps,
461-
self.strength,
462-
device=unet.device,
463-
)
456+
# TODO: Verify the noise is the right size
457+
initial_latents = (
458+
latent if self.strength < 1.0 else torch.zeros_like(latent, device=unet.device, dtype=latent.dtype)
459+
)
464460

465-
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
466-
latents=initial_latents,
467-
timesteps=timesteps,
468-
noise=noise,
469-
num_inference_steps=self.steps,
470-
conditioning_data=conditioning_data,
471-
control_data=control_data, # list[ControlNetData]
472-
callback=step_callback,
473-
)
461+
timesteps, _ = pipeline.get_img2img_timesteps(
462+
self.steps,
463+
self.strength,
464+
device=unet.device,
465+
)
474466

475-
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
476-
result_latents = result_latents.to("cpu")
477-
torch.cuda.empty_cache()
467+
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
468+
latents=initial_latents,
469+
timesteps=timesteps,
470+
noise=noise,
471+
num_inference_steps=self.steps,
472+
conditioning_data=conditioning_data,
473+
control_data=control_data, # list[ControlNetData]
474+
callback=step_callback,
475+
)
478476

479-
name = f"{context.graph_execution_state_id}__{self.id}"
480-
context.services.latents.save(name, result_latents)
477+
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
478+
result_latents = result_latents.to("cpu")
479+
torch.cuda.empty_cache()
480+
481+
name = f"{context.graph_execution_state_id}__{self.id}"
482+
context.services.latents.save(name, result_latents)
481483
return build_latents_output(latents_name=name, latents=result_latents)
482484

483485

@@ -490,7 +492,7 @@ class LatentsToImageInvocation(BaseInvocation):
490492
# Inputs
491493
latents: Optional[LatentsField] = Field(description="The latents to generate an image from")
492494
vae: VaeField = Field(default=None, description="Vae submodel")
493-
tiled: bool = Field(default=False, description="Decode latents by overlapping tiles(less memory consumption)")
495+
tiled: bool = Field(default=False, description="Decode latents by overlaping tiles (less memory consumption)")
494496
fp32: bool = Field(DEFAULT_PRECISION == "float32", description="Decode in full precision")
495497
metadata: Optional[CoreMetadata] = Field(
496498
default=None, description="Optional core metadata to be written to the image"

invokeai/backend/model_management/model_manager.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,11 @@ def create_key(
401401
base_model: BaseModelType,
402402
model_type: ModelType,
403403
) -> str:
404-
return f"{base_model}/{model_type}/{model_name}"
404+
# In 3.11, the behavior of (str,enum) when interpolated into a
405+
# string has changed. The next two lines are defensive.
406+
base_model = BaseModelType(base_model)
407+
model_type = ModelType(model_type)
408+
return f"{base_model.value}/{model_type.value}/{model_name}"
405409

406410
@classmethod
407411
def parse_key(cls, model_key: str) -> Tuple[str, BaseModelType, ModelType]:

0 commit comments

Comments
 (0)