Skip to content

Commit e3e767c

Browse files
author
Gaurav Shukla
committed
[WEB] Remove live preview and disable resnet|albert_maskfill
This commit removes live preview feature for now as it's not functional. This feature will be added in the next patch. Signed-Off-by: Gaurav Shukla <[email protected]>
1 parent 239c19e commit e3e767c

File tree

4 files changed

+85
-88
lines changed

4 files changed

+85
-88
lines changed

web/Nod_logo.jpg

-40.6 KB
Binary file not shown.

web/Nod_logo.png

32.9 KB
Loading

web/index.py

Lines changed: 75 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -16,82 +16,82 @@ def debug_event(debug):
1616
with gr.Row():
1717
with gr.Group():
1818
with gr.Column(scale=1):
19-
img = Image.open("./Nod_logo.jpg")
19+
img = Image.open("./Nod_logo.png")
2020
gr.Image(value=img, show_label=False, interactive=False).style(
21-
height=70, width=70
21+
height=80, width=150
2222
)
23-
with gr.Column(scale=9):
23+
with gr.Column(scale=1):
2424
gr.Label(value="Shark Models Demo.")
2525

2626
with gr.Tabs():
27-
with gr.TabItem("ResNet50"):
28-
image = device = debug = resnet = output = std_output = None
29-
with gr.Row():
30-
with gr.Column(scale=1, min_width=600):
31-
image = gr.Image(label="Image")
32-
device = gr.Radio(
33-
label="Device",
34-
value="cpu",
35-
choices=["cpu", "cuda", "vulkan"],
36-
)
37-
debug = gr.Checkbox(label="DEBUG", value=False)
38-
resnet = gr.Button("Recognize Image").style(
39-
full_width=True
40-
)
41-
with gr.Column(scale=1, min_width=600):
42-
output = gr.Label(label="Output")
43-
std_output = gr.Textbox(
44-
label="Std Output",
45-
value="Nothing to show.",
46-
visible=False,
47-
)
48-
debug.change(
49-
debug_event,
50-
inputs=[debug],
51-
outputs=[std_output],
52-
show_progress=False,
53-
)
54-
resnet.click(
55-
resnet_inf,
56-
inputs=[image, device],
57-
outputs=[output, std_output],
58-
)
59-
60-
with gr.TabItem("Albert MaskFill"):
61-
masked_text = (
62-
device
63-
) = debug = albert_mask = decoded_res = std_output = None
64-
with gr.Row():
65-
with gr.Column(scale=1, min_width=600):
66-
masked_text = gr.Textbox(
67-
label="Masked Text",
68-
placeholder="Give me a sentence with [MASK] to fill",
69-
)
70-
device = gr.Radio(
71-
label="Device",
72-
value="cpu",
73-
choices=["cpu", "cuda", "vulkan"],
74-
)
75-
debug = gr.Checkbox(label="DEBUG", value=False)
76-
albert_mask = gr.Button("Decode Mask")
77-
with gr.Column(scale=1, min_width=600):
78-
decoded_res = gr.Label(label="Decoded Results")
79-
std_output = gr.Textbox(
80-
label="Std Output",
81-
value="Nothing to show.",
82-
visible=False,
83-
)
84-
debug.change(
85-
debug_event,
86-
inputs=[debug],
87-
outputs=[std_output],
88-
show_progress=False,
89-
)
90-
albert_mask.click(
91-
albert_maskfill_inf,
92-
inputs=[masked_text, device],
93-
outputs=[decoded_res, std_output],
94-
)
27+
# with gr.TabItem("ResNet50"):
28+
# image = device = debug = resnet = output = std_output = None
29+
# with gr.Row():
30+
# with gr.Column(scale=1, min_width=600):
31+
# image = gr.Image(label="Image")
32+
# device = gr.Radio(
33+
# label="Device",
34+
# value="cpu",
35+
# choices=["cpu", "cuda", "vulkan"],
36+
# )
37+
# debug = gr.Checkbox(label="DEBUG", value=False)
38+
# resnet = gr.Button("Recognize Image").style(
39+
# full_width=True
40+
# )
41+
# with gr.Column(scale=1, min_width=600):
42+
# output = gr.Label(label="Output")
43+
# std_output = gr.Textbox(
44+
# label="Std Output",
45+
# value="Nothing to show.",
46+
# visible=False,
47+
# )
48+
# debug.change(
49+
# debug_event,
50+
# inputs=[debug],
51+
# outputs=[std_output],
52+
# show_progress=False,
53+
# )
54+
# resnet.click(
55+
# resnet_inf,
56+
# inputs=[image, device],
57+
# outputs=[output, std_output],
58+
# )
59+
#
60+
# with gr.TabItem("Albert MaskFill"):
61+
# masked_text = (
62+
# device
63+
# ) = debug = albert_mask = decoded_res = std_output = None
64+
# with gr.Row():
65+
# with gr.Column(scale=1, min_width=600):
66+
# masked_text = gr.Textbox(
67+
# label="Masked Text",
68+
# placeholder="Give me a sentence with [MASK] to fill",
69+
# )
70+
# device = gr.Radio(
71+
# label="Device",
72+
# value="cpu",
73+
# choices=["cpu", "cuda", "vulkan"],
74+
# )
75+
# debug = gr.Checkbox(label="DEBUG", value=False)
76+
# albert_mask = gr.Button("Decode Mask")
77+
# with gr.Column(scale=1, min_width=600):
78+
# decoded_res = gr.Label(label="Decoded Results")
79+
# std_output = gr.Textbox(
80+
# label="Std Output",
81+
# value="Nothing to show.",
82+
# visible=False,
83+
# )
84+
# debug.change(
85+
# debug_event,
86+
# inputs=[debug],
87+
# outputs=[std_output],
88+
# show_progress=False,
89+
# )
90+
# albert_mask.click(
91+
# albert_maskfill_inf,
92+
# inputs=[masked_text, device],
93+
# outputs=[decoded_res, std_output],
94+
# )
9595

9696
# with gr.TabItem("V-Diffusion"):
9797
# prompt = sample_count = batch_size = iters = device = v_diffusion = generated_img = None
@@ -192,12 +192,14 @@ def debug_event(debug):
192192
value="vulkan",
193193
choices=["cpu", "cuda", "vulkan"],
194194
)
195-
load_vmfb = gr.Checkbox(label="Load vmfb", value=True)
196-
save_vmfb = gr.Checkbox(label="Save vmfb", value=False)
195+
with gr.Row():
196+
load_vmfb = gr.Checkbox(label="Load vmfb", value=True)
197+
save_vmfb = gr.Checkbox(label="Save vmfb", value=False)
197198
iree_vulkan_target_triple = gr.Textbox(
198199
value="",
199200
max_lines=1,
200201
label="IREE VULKAN TARGET TRIPLE",
202+
visible=False,
201203
)
202204
debug = gr.Checkbox(label="DEBUG", value=False)
203205
stable_diffusion = gr.Button("Generate image from prompt")
@@ -236,5 +238,4 @@ def debug_event(debug):
236238
outputs=[generated_img, std_output],
237239
)
238240

239-
shark_web.queue()
240241
shark_web.launch(share=True, server_port=8080, enable_queue=True)

web/models/stable_diffusion/main.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,7 @@ def get_models():
7272
IREE_EXTRA_ARGS += [
7373
"--iree-flow-enable-conv-nchw-to-nhwc-transform",
7474
"--iree-flow-enable-padding-linalg-ops",
75-
"--iree-flow-linalg-ops-padding-size=16",
76-
"--iree-flow-enable-iterator-space-fusion",
75+
"--iree-flow-linalg-ops-padding-size=32",
7776
]
7877
if args.import_mlir == True:
7978
return get_vae16(args, model_name=VAE_FP16), get_unet16_wrapped(
@@ -229,10 +228,8 @@ def stable_diff_inf(
229228
text_embeddings_numpy = text_embeddings.detach().numpy()
230229

231230
avg_ms = 0
232-
pil_images = []
233231
for i, t in tqdm(enumerate(scheduler.timesteps)):
234232

235-
time.sleep(0.1)
236233
if DEBUG:
237234
log_write.write(f"\ni = {i} t = {t} ")
238235
step_start = time.time()
@@ -250,16 +247,15 @@ def stable_diff_inf(
250247
if DEBUG:
251248
log_write.write(f"time={step_ms}ms")
252249
latents = scheduler.step(noise_pred, i, latents)["prev_sample"]
253-
# scale and decode the image latents with vae
254-
latents = 1 / 0.18215 * latents
255-
latents_numpy = latents.detach().numpy()
256-
image = vae.forward((latents_numpy,))
257-
image = torch.from_numpy(image)
258-
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
259-
images = (image * 255).round().astype("uint8")
260-
pil_images = [Image.fromarray(image) for image in images]
261-
yield pil_images[0], ""
262250

251+
# scale and decode the image latents with vae
252+
latents = 1 / 0.18215 * latents
253+
latents_numpy = latents.detach().numpy()
254+
image = vae.forward((latents_numpy,))
255+
image = torch.from_numpy(image)
256+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
257+
images = (image * 255).round().astype("uint8")
258+
pil_images = [Image.fromarray(image) for image in images]
263259
avg_ms = 1000 * avg_ms / args.steps
264260
if DEBUG:
265261
log_write.write(f"\nAverage step time: {avg_ms}ms/it")
@@ -273,4 +269,4 @@ def stable_diff_inf(
273269
std_output = ""
274270
with open(r"logs/stable_diffusion_log.txt", "r") as log_read:
275271
std_output = log_read.read()
276-
yield output, std_output
272+
return output, std_output

0 commit comments

Comments
 (0)