Tony Lian
Update
58524a7
import gradio as gr
import numpy as np
import ast
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
box_scale = (512, 512)
size = box_scale
bg_prompt_text = "Background prompt: "
simplified_prompt = """You are an intelligent bounding box generator. I will provide you with a caption for a photo, image, or painting. Your task is to generate the bounding boxes for the objects mentioned in the caption, along with a background prompt describing the scene. The images are of size 512x512, and the bounding boxes should not overlap or go beyond the image boundaries. Each bounding box should be in the format of (object name, [top-left x coordinate, top-left y coordinate, box width, box height]) and include exactly one object. Do not put objects that are already provided in the bounding boxes into the background prompt. If needed, you can make reasonable guesses. Please refer to the example below for the desired format.
Caption: A realistic image of landscape scene depicting a green car parking on the left of a blue truck, with a red air balloon and a bird in the sky
Objects: [('a green car', [21, 181, 211, 159]), ('a blue truck', [269, 181, 209, 160]), ('a red air balloon', [66, 8, 145, 135]), ('a bird', [296, 42, 143, 100])]
Background prompt: A realistic image of a landscape scene
Caption: A watercolor painting of a wooden table in the living room with an apple on it
Objects: [('a wooden table', [65, 243, 344, 206]), ('a apple', [206, 306, 81, 69])]
Background prompt: A watercolor painting of a living room
Caption: A watercolor painting of two pandas eating bamboo in a forest
Objects: [('a panda eating bambooo', [30, 171, 212, 226]), ('a panda eating bambooo', [264, 173, 222, 221])]
Background prompt: A watercolor painting of a forest
Caption: A realistic image of four skiers standing in a line on the snow near a palm tree
Objects: [('a skier', [5, 152, 139, 168]), ('a skier', [278, 192, 121, 158]), ('a skier', [148, 173, 124, 155]), ('a palm tree', [404, 180, 103, 180])]
Background prompt: A realistic image of an outdoor scene with snow
Caption: An oil painting of a pink dolphin jumping on the left of a steam boat on the sea
Objects: [('a steam boat', [232, 225, 257, 149]), ('a jumping pink dolphin', [21, 249, 189, 123])]
Background prompt: An oil painting of the sea
Caption: A realistic image of a cat playing with a dog in a park with flowers
Objects: [('a playful cat', [51, 67, 271, 324]), ('a playful dog', [302, 119, 211, 228])]
Background prompt: A realistic image of a park with flowers
Caption: ไธ€ไธชๅฎขๅŽ…ๅœบๆ™ฏ็š„ๆฒน็”ป๏ผŒๅข™ไธŠๆŒ‚็€็”ต่ง†๏ผŒ็”ต่ง†ไธ‹้ขๆ˜ฏไธ€ไธชๆŸœๅญ๏ผŒๆŸœๅญไธŠๆœ‰ไธ€ไธช่Šฑ็“ถใ€‚
Objects: [('a tv', [88, 85, 335, 203]), ('a cabinet', [57, 308, 404, 201]), ('a flower vase', [166, 222, 92, 108])]
Background prompt: An oil painting of a living room scene
Caption: {prompt}
Objects: """
def get_lmd_prompt(prompt):
if prompt == "":
prompt = "A realistic photo of a gray cat and an orange dog on the grass."
return simplified_prompt.format(prompt=prompt)
def get_layout_image(response):
gen_boxes, bg_prompt = parse_input(response)
fig = plt.figure(figsize=(8, 8))
# https://stackoverflow.com/questions/7821518/save-plot-to-numpy-array
show_boxes(gen_boxes, bg_prompt)
# If we haven't already shown or saved the plot, then we need to
# draw the figure first...
fig.canvas.draw()
# Now we can save it to a numpy array.
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.clf()
return data
def parse_input(text=None):
try:
if "Objects: " in text:
text = text.split("Objects: ")[1]
text_split = text.split(bg_prompt_text)
if len(text_split) == 2:
gen_boxes, bg_prompt = text_split
gen_boxes = ast.literal_eval(gen_boxes)
bg_prompt = bg_prompt.strip()
except Exception as e:
raise gr.Error(f"response format invalid: {e} (text: {text})")
return gen_boxes, bg_prompt
def draw_boxes(anns):
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4)
[bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox']
poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h],
[bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]
np_poly = np.array(poly).reshape((4, 2))
polygons.append(Polygon(np_poly))
color.append(c)
# print(ann)
name = ann['name'] if 'name' in ann else str(ann['category_id'])
ax.text(bbox_x, bbox_y, name, style='italic',
bbox={'facecolor': 'white', 'alpha': 0.7, 'pad': 5})
p = PatchCollection(polygons, facecolor='none',
edgecolors=color, linewidths=2)
ax.add_collection(p)
def show_boxes(gen_boxes, bg_prompt=None):
anns = [{'name': gen_box[0], 'bbox': gen_box[1]}
for gen_box in gen_boxes]
# White background (to allow line to show on the edge)
I = np.ones((size[0]+4, size[1]+4, 3), dtype=np.uint8) * 255
plt.imshow(I)
plt.axis('off')
if bg_prompt is not None:
ax = plt.gca()
ax.text(0, 0, bg_prompt, style='italic',
bbox={'facecolor': 'white', 'alpha': 0.7, 'pad': 5})
c = np.zeros((1, 3))
[bbox_x, bbox_y, bbox_w, bbox_h] = (0, 0, size[1], size[0])
poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h],
[bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]
np_poly = np.array(poly).reshape((4, 2))
polygons = [Polygon(np_poly)]
color = [c]
p = PatchCollection(polygons, facecolor='none',
edgecolors=color, linewidths=2)
ax.add_collection(p)
draw_boxes(anns)
with gr.Blocks() as g:
gr.HTML("""<h1>LLM-grounded Diffusion: Enhancing Prompt Understanding of Text-to-Image Diffusion Models with Large Language Models</h1>
<p>This is a space that allows you to explore the layouts generated by ChatGPT on your own with a simplified set of examples. The layout-to-image generation part will be added.</p>
<p>Read our <a href='https://llm-grounded-diffusion.github.io/'>a brief introduction on our project page</a> or <a href='https://arxiv.org/pdf/2305.13655.pdf'>our work on arxiv</a>. <a href='https://llm-grounded-diffusion.github.io/#citation'>Cite our work</a> if our ideas inspire you.</p>
<p><b>Tips:</b><p>
<p>1. If ChatGPT doesn't generate layout, add/remove the trailing space (added by default) and/or use GPT-4.</p>
<p>2. You can perform multi-round specification by giving ChatGPT follow-up requests (e.g., make the object boxes bigger).</p>
<p>3. You can also try prompts in Simplified Chinese. If you want to try prompts in another language, translate the first line of last example to your language.<p>""")
with gr.Tab("Image Prompt to ChatGPT"):
with gr.Row():
with gr.Column(scale=1):
prompt = gr.Textbox(lines=2, label="Prompt for Layout Generation", placeholder="A realistic photo of a gray cat and an orange dog on the grass.")
greet_btn = gr.Button("Generate Prompt")
with gr.Column(scale=1):
output = gr.Textbox(label="Paste this into ChatGPT (GPT-4 usually gives better results)")
greet_btn.click(fn=get_lmd_prompt, inputs=prompt, outputs=output, api_name="get_lmd_prompt")
with gr.Tab("Visualize ChatGPT-generated Layout"):
with gr.Row():
with gr.Column(scale=1):
prompt = gr.Textbox(lines=2, label="Paste ChatGPT response here", placeholder="Paste ChatGPT response here")
greet_btn = gr.Button("Visualize Layout")
with gr.Column(scale=1):
output = gr.Image(shape=(512, 512), elem_classes="img", elem_id="img", css="img {width: 300px}")
greet_btn.click(fn=get_layout_image, inputs=prompt, outputs=output, api_name="chatgpt-to-layout")
g.launch()