Michael Yang
commited on
Commit
•
7134722
1
Parent(s):
0305ee7
b64 support:
Browse files- app.py +34 -13
- generation.py +7 -1
app.py
CHANGED
@@ -10,6 +10,11 @@ from baseline import run as run_baseline
|
|
10 |
import torch
|
11 |
from shared import DEFAULT_SO_NEGATIVE_PROMPT, DEFAULT_OVERALL_NEGATIVE_PROMPT
|
12 |
from examples import stage1_examples, stage2_examples
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
15 |
if torch.cuda.is_available():
|
@@ -61,6 +66,9 @@ layout_placeholder = """Caption: A realistic photo of a gray cat and an orange d
|
|
61 |
Objects: [('a gray cat', [67, 243, 120, 126]), ('an orange dog', [265, 193, 190, 210])]
|
62 |
Background prompt: A realistic photo of a grassy area."""
|
63 |
|
|
|
|
|
|
|
64 |
def get_lmd_prompt(prompt, template=default_template):
|
65 |
if prompt == "":
|
66 |
prompt = prompt_placeholder
|
@@ -69,6 +77,7 @@ def get_lmd_prompt(prompt, template=default_template):
|
|
69 |
return simplified_prompt.format(template=template, prompt=prompt)
|
70 |
|
71 |
def get_layout_image(response):
|
|
|
72 |
if response == "":
|
73 |
response = layout_placeholder
|
74 |
gen_boxes, bg_prompt = parse_input(response)
|
@@ -82,13 +91,19 @@ def get_layout_image(response):
|
|
82 |
# Now we can save it to a numpy array.
|
83 |
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
|
84 |
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
|
|
|
|
|
|
|
|
|
|
85 |
plt.clf()
|
86 |
-
return data
|
87 |
|
88 |
def get_layout_image_gallery(response):
|
89 |
-
return
|
90 |
|
91 |
def get_ours_image(response, overall_prompt_override="", seed=0, num_inference_steps=20, dpm_scheduler=True, use_autocast=False, fg_seed_start=20, fg_blending_ratio=0.1, frozen_step_ratio=0.4, gligen_scheduled_sampling_beta=0.3, so_negative_prompt=DEFAULT_SO_NEGATIVE_PROMPT, overall_negative_prompt=DEFAULT_OVERALL_NEGATIVE_PROMPT, show_so_imgs=False, scale_boxes=False):
|
|
|
92 |
if response == "":
|
93 |
response = layout_placeholder
|
94 |
gen_boxes, bg_prompt = parse_input(response)
|
@@ -105,15 +120,20 @@ def get_ours_image(response, overall_prompt_override="", seed=0, num_inference_s
|
|
105 |
else:
|
106 |
scheduler_key = "scheduler"
|
107 |
|
108 |
-
image_np, so_img_list = run_ours(
|
109 |
spec, bg_seed=seed, overall_prompt_override=overall_prompt_override, fg_seed_start=fg_seed_start,
|
110 |
fg_blending_ratio=fg_blending_ratio,frozen_step_ratio=frozen_step_ratio, use_autocast=use_autocast,
|
111 |
gligen_scheduled_sampling_beta=gligen_scheduled_sampling_beta, num_inference_steps=num_inference_steps, scheduler_key=scheduler_key,
|
112 |
so_negative_prompt=so_negative_prompt, overall_negative_prompt=overall_negative_prompt, so_batch_size=2
|
113 |
)
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
117 |
return images
|
118 |
|
119 |
def get_baseline_image(prompt, seed=0):
|
@@ -230,7 +250,7 @@ with gr.Blocks(
|
|
230 |
inputs=[prompt],
|
231 |
outputs=[output],
|
232 |
fn=get_lmd_prompt,
|
233 |
-
cache_examples=True
|
234 |
)
|
235 |
|
236 |
with gr.Tab("Stage 2 (New). Layout to Image generation"):
|
@@ -254,18 +274,19 @@ with gr.Blocks(
|
|
254 |
visualize_btn = gr.Button("Visualize Layout", elem_classes="btn")
|
255 |
generate_btn = gr.Button("Generate Image from Layout", variant='primary', elem_classes="btn")
|
256 |
with gr.Column(scale=1):
|
257 |
-
gallery = gr.
|
258 |
-
label="Generated image", show_label=False, elem_id="gallery", columns=[1], rows=[1], object_fit="contain"
|
259 |
)
|
260 |
-
|
261 |
-
|
|
|
262 |
|
263 |
gr.Examples(
|
264 |
examples=stage2_examples,
|
265 |
inputs=[response, overall_prompt_override, seed],
|
266 |
outputs=[gallery],
|
267 |
fn=get_ours_image,
|
268 |
-
cache_examples=True
|
269 |
)
|
270 |
|
271 |
with gr.Tab("Baseline: Stable Diffusion"):
|
@@ -287,7 +308,7 @@ with gr.Blocks(
|
|
287 |
inputs=[sd_prompt],
|
288 |
outputs=[gallery],
|
289 |
fn=get_baseline_image,
|
290 |
-
cache_examples=True
|
291 |
)
|
292 |
|
293 |
g.launch()
|
|
|
10 |
import torch
|
11 |
from shared import DEFAULT_SO_NEGATIVE_PROMPT, DEFAULT_OVERALL_NEGATIVE_PROMPT
|
12 |
from examples import stage1_examples, stage2_examples
|
13 |
+
import pickle
|
14 |
+
import codecs
|
15 |
+
import subprocess
|
16 |
+
import base64
|
17 |
+
import io
|
18 |
|
19 |
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
20 |
if torch.cuda.is_available():
|
|
|
66 |
Objects: [('a gray cat', [67, 243, 120, 126]), ('an orange dog', [265, 193, 190, 210])]
|
67 |
Background prompt: A realistic photo of a grassy area."""
|
68 |
|
69 |
+
canvasbase64 = ""
|
70 |
+
oursimagebase64 = ""
|
71 |
+
|
72 |
def get_lmd_prompt(prompt, template=default_template):
|
73 |
if prompt == "":
|
74 |
prompt = prompt_placeholder
|
|
|
77 |
return simplified_prompt.format(template=template, prompt=prompt)
|
78 |
|
79 |
def get_layout_image(response):
|
80 |
+
global canvasbase64
|
81 |
if response == "":
|
82 |
response = layout_placeholder
|
83 |
gen_boxes, bg_prompt = parse_input(response)
|
|
|
91 |
# Now we can save it to a numpy array.
|
92 |
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
|
93 |
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
94 |
+
pic_IObytes = io.BytesIO()
|
95 |
+
plt.savefig(pic_IObytes, format='png')
|
96 |
+
pic_IObytes.seek(0)
|
97 |
+
canvasbase64 = base64.b64encode(pic_IObytes.read()).decode()
|
98 |
+
|
99 |
plt.clf()
|
100 |
+
return [data,canvasbase64]
|
101 |
|
102 |
def get_layout_image_gallery(response):
|
103 |
+
return get_layout_image(response)
|
104 |
|
105 |
def get_ours_image(response, overall_prompt_override="", seed=0, num_inference_steps=20, dpm_scheduler=True, use_autocast=False, fg_seed_start=20, fg_blending_ratio=0.1, frozen_step_ratio=0.4, gligen_scheduled_sampling_beta=0.3, so_negative_prompt=DEFAULT_SO_NEGATIVE_PROMPT, overall_negative_prompt=DEFAULT_OVERALL_NEGATIVE_PROMPT, show_so_imgs=False, scale_boxes=False):
|
106 |
+
global oursimagebase64
|
107 |
if response == "":
|
108 |
response = layout_placeholder
|
109 |
gen_boxes, bg_prompt = parse_input(response)
|
|
|
120 |
else:
|
121 |
scheduler_key = "scheduler"
|
122 |
|
123 |
+
image_np, so_img_list, b64 = run_ours(
|
124 |
spec, bg_seed=seed, overall_prompt_override=overall_prompt_override, fg_seed_start=fg_seed_start,
|
125 |
fg_blending_ratio=fg_blending_ratio,frozen_step_ratio=frozen_step_ratio, use_autocast=use_autocast,
|
126 |
gligen_scheduled_sampling_beta=gligen_scheduled_sampling_beta, num_inference_steps=num_inference_steps, scheduler_key=scheduler_key,
|
127 |
so_negative_prompt=so_negative_prompt, overall_negative_prompt=overall_negative_prompt, so_batch_size=2
|
128 |
)
|
129 |
+
print(type(image_np))
|
130 |
+
pic_IObytes = io.BytesIO()
|
131 |
+
plt.savefig(pic_IObytes, format='png')
|
132 |
+
pic_IObytes.seek(0)
|
133 |
+
canvasbase64 = base64.b64encode(pic_IObytes.read()).decode()
|
134 |
+
images = [image_np, b64]
|
135 |
+
# if show_so_imgs:
|
136 |
+
# images.extend([np.asarray(so_img) for so_img in so_img_list])
|
137 |
return images
|
138 |
|
139 |
def get_baseline_image(prompt, seed=0):
|
|
|
250 |
inputs=[prompt],
|
251 |
outputs=[output],
|
252 |
fn=get_lmd_prompt,
|
253 |
+
# cache_examples=True
|
254 |
)
|
255 |
|
256 |
with gr.Tab("Stage 2 (New). Layout to Image generation"):
|
|
|
274 |
visualize_btn = gr.Button("Visualize Layout", elem_classes="btn")
|
275 |
generate_btn = gr.Button("Generate Image from Layout", variant='primary', elem_classes="btn")
|
276 |
with gr.Column(scale=1):
|
277 |
+
gallery = gr.Image(
|
278 |
+
label="Generated image", show_label=False, elem_id="gallery", columns=[1], rows=[1], object_fit="contain"
|
279 |
)
|
280 |
+
b64 = gr.Textbox(label="base64", placeholder="base64", lines = 2)
|
281 |
+
visualize_btn.click(fn=get_layout_image_gallery, inputs=response, outputs=[gallery, b64], api_name="visualize-layout")
|
282 |
+
generate_btn.click(fn=get_ours_image, inputs=[response, overall_prompt_override, seed, num_inference_steps, dpm_scheduler, use_autocast, fg_seed_start, fg_blending_ratio, frozen_step_ratio, gligen_scheduled_sampling_beta, so_negative_prompt, overall_negative_prompt, show_so_imgs, scale_boxes], outputs=[gallery, b64], api_name="layout-to-image")
|
283 |
|
284 |
gr.Examples(
|
285 |
examples=stage2_examples,
|
286 |
inputs=[response, overall_prompt_override, seed],
|
287 |
outputs=[gallery],
|
288 |
fn=get_ours_image,
|
289 |
+
# cache_examples=True
|
290 |
)
|
291 |
|
292 |
with gr.Tab("Baseline: Stable Diffusion"):
|
|
|
308 |
inputs=[sd_prompt],
|
309 |
outputs=[gallery],
|
310 |
fn=get_baseline_image,
|
311 |
+
# cache_examples=True
|
312 |
)
|
313 |
|
314 |
g.launch()
|
generation.py
CHANGED
@@ -8,6 +8,8 @@ from models import pipelines, sam
|
|
8 |
from utils import parse, latents
|
9 |
from shared import model_dict, sam_model_dict, DEFAULT_SO_NEGATIVE_PROMPT, DEFAULT_OVERALL_NEGATIVE_PROMPT
|
10 |
import gc
|
|
|
|
|
11 |
|
12 |
verbose = False
|
13 |
|
@@ -209,6 +211,10 @@ def run(
|
|
209 |
|
210 |
gc.collect()
|
211 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
|
212 |
|
213 |
-
return images[0], so_img_list
|
214 |
|
|
|
8 |
from utils import parse, latents
|
9 |
from shared import model_dict, sam_model_dict, DEFAULT_SO_NEGATIVE_PROMPT, DEFAULT_OVERALL_NEGATIVE_PROMPT
|
10 |
import gc
|
11 |
+
from io import BytesIO
|
12 |
+
import base64
|
13 |
|
14 |
verbose = False
|
15 |
|
|
|
211 |
|
212 |
gc.collect()
|
213 |
torch.cuda.empty_cache()
|
214 |
+
|
215 |
+
with BytesIO() as buffer:
|
216 |
+
np.save(buffer, images[0])
|
217 |
+
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
218 |
|
219 |
+
return images[0], so_img_list, img_str
|
220 |
|