Skip to content

Commit 42548df

Browse files
authored
convert nodes_lumina2.py to V3 schema (comfyanonymous#10058)
1 parent fd7e78c commit 42548df

File tree

1 file changed

+62
-39
lines changed

1 file changed

+62
-39
lines changed

comfy_extras/nodes_lumina2.py

Lines changed: 62 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,27 @@
1-
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
1+
from typing_extensions import override
22
import torch
33

4+
from comfy_api.latest import ComfyExtension, io
45

5-
class RenormCFG:
6-
@classmethod
7-
def INPUT_TYPES(s):
8-
return {"required": { "model": ("MODEL",),
9-
"cfg_trunc": ("FLOAT", {"default": 100, "min": 0.0, "max": 100.0, "step": 0.01}),
10-
"renorm_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
11-
}}
12-
RETURN_TYPES = ("MODEL",)
13-
FUNCTION = "patch"
146

15-
CATEGORY = "advanced/model"
7+
class RenormCFG(io.ComfyNode):
8+
@classmethod
9+
def define_schema(cls):
10+
return io.Schema(
11+
node_id="RenormCFG",
12+
category="advanced/model",
13+
inputs=[
14+
io.Model.Input("model"),
15+
io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01),
16+
io.Float.Input("renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01),
17+
],
18+
outputs=[
19+
io.Model.Output(),
20+
],
21+
)
1622

17-
def patch(self, model, cfg_trunc, renorm_cfg):
23+
@classmethod
24+
def execute(cls, model, cfg_trunc, renorm_cfg) -> io.NodeOutput:
1825
def renorm_cfg_func(args):
1926
cond_denoised = args["cond_denoised"]
2027
uncond_denoised = args["uncond_denoised"]
@@ -53,10 +60,10 @@ def renorm_cfg_func(args):
5360

5461
m = model.clone()
5562
m.set_model_sampler_cfg_function(renorm_cfg_func)
56-
return (m, )
63+
return io.NodeOutput(m)
5764

5865

59-
class CLIPTextEncodeLumina2(ComfyNodeABC):
66+
class CLIPTextEncodeLumina2(io.ComfyNode):
6067
SYSTEM_PROMPT = {
6168
"superior": "You are an assistant designed to generate superior images with the superior "\
6269
"degree of image-text alignment based on textual prompts or user prompts.",
@@ -69,36 +76,52 @@ class CLIPTextEncodeLumina2(ComfyNodeABC):
6976
"Alignment: You are an assistant designed to generate high-quality images with the highest "\
7077
"degree of image-text alignment based on textual prompts."
7178
@classmethod
72-
def INPUT_TYPES(s) -> InputTypeDict:
73-
return {
74-
"required": {
75-
"system_prompt": (list(CLIPTextEncodeLumina2.SYSTEM_PROMPT.keys()), {"tooltip": CLIPTextEncodeLumina2.SYSTEM_PROMPT_TIP}),
76-
"user_prompt": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}),
77-
"clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."})
78-
}
79-
}
80-
RETURN_TYPES = (IO.CONDITIONING,)
81-
OUTPUT_TOOLTIPS = ("A conditioning containing the embedded text used to guide the diffusion model.",)
82-
FUNCTION = "encode"
83-
84-
CATEGORY = "conditioning"
85-
DESCRIPTION = "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
86-
87-
def encode(self, clip, user_prompt, system_prompt):
79+
def define_schema(cls):
80+
return io.Schema(
81+
node_id="CLIPTextEncodeLumina2",
82+
display_name="CLIP Text Encode for Lumina2",
83+
category="conditioning",
84+
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "
85+
"that can be used to guide the diffusion model towards generating specific images.",
86+
inputs=[
87+
io.Combo.Input(
88+
"system_prompt",
89+
options=list(cls.SYSTEM_PROMPT.keys()),
90+
tooltip=cls.SYSTEM_PROMPT_TIP,
91+
),
92+
io.String.Input(
93+
"user_prompt",
94+
multiline=True,
95+
dynamic_prompts=True,
96+
tooltip="The text to be encoded.",
97+
),
98+
io.Clip.Input("clip", tooltip="The CLIP model used for encoding the text."),
99+
],
100+
outputs=[
101+
io.Conditioning.Output(
102+
tooltip="A conditioning containing the embedded text used to guide the diffusion model.",
103+
),
104+
],
105+
)
106+
107+
@classmethod
108+
def execute(cls, clip, user_prompt, system_prompt) -> io.NodeOutput:
88109
if clip is None:
89110
raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.")
90-
system_prompt = CLIPTextEncodeLumina2.SYSTEM_PROMPT[system_prompt]
111+
system_prompt = cls.SYSTEM_PROMPT[system_prompt]
91112
prompt = f'{system_prompt} <Prompt Start> {user_prompt}'
92113
tokens = clip.tokenize(prompt)
93-
return (clip.encode_from_tokens_scheduled(tokens), )
114+
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
94115

95116

96-
NODE_CLASS_MAPPINGS = {
97-
"CLIPTextEncodeLumina2": CLIPTextEncodeLumina2,
98-
"RenormCFG": RenormCFG
99-
}
117+
class Lumina2Extension(ComfyExtension):
118+
@override
119+
async def get_node_list(self) -> list[type[io.ComfyNode]]:
120+
return [
121+
CLIPTextEncodeLumina2,
122+
RenormCFG,
123+
]
100124

101125

102-
NODE_DISPLAY_NAME_MAPPINGS = {
103-
"CLIPTextEncodeLumina2": "CLIP Text Encode for Lumina2",
104-
}
126+
async def comfy_entrypoint() -> Lumina2Extension:
127+
return Lumina2Extension()

0 commit comments

Comments
 (0)