File size: 5,701 Bytes
a660631 43d251c a660631 f521e88 a660631 f521e88 a660631 f521e88 a660631 53b39de f521e88 a660631 f521e88 a660631 43d251c f521e88 43d251c f521e88 a6d82aa f521e88 43d251c 208f8fb a660631 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 f521e88 84448a9 a660631 f521e88 a660631 d6252d0 f521e88 d6252d0 f521e88 a660631 d6252d0 f521e88 d6252d0 f521e88 d6252d0 f521e88 a660631 f521e88 a660631 681c919 f521e88 681c919 3f0b2d3 681c919 a660631 074f54b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
#!/usr/bin/env python
from __future__ import annotations
import gradio as gr
import torch
import re
import openai
from cairosvg import svg2png
from app_canny import create_demo as create_demo_canny
from app_depth import create_demo as create_demo_depth
from app_ip2p import create_demo as create_demo_ip2p
from app_lineart import create_demo as create_demo_lineart
from app_mlsd import create_demo as create_demo_mlsd
from app_normal import create_demo as create_demo_normal
from app_openpose import create_demo as create_demo_openpose
from app_scribble import create_demo as create_demo_scribble
from app_scribble_interactive import create_demo as create_demo_scribble_interactive
from app_segmentation import create_demo as create_demo_segmentation
from app_shuffle import create_demo as create_demo_shuffle
from app_softedge import create_demo as create_demo_softedge
from model import Model
from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
DESCRIPTION = "# ControlNet v1.1"
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU ๐ฅถ This demo does not work on CPU.</p>"
model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
def gpt_control(apikey, prompt):
openai.api_key = apikey
# gpt
messages = [{"role": "system", "content": "You are an SVG expert with years of experience and multiple contributions to the SVG project. Based on the prompt and the description, please generate the corresponding SVG code."},
{"role": "user", "content": f"""Provide only the shell command without any explanations.
The current objective is below. Reply with the SVG code only:
OBJECTIVE: {prompt}
YOUR SVG CODE:
"""}]
completion = openai.ChatCompletion.create(
model = "gpt-4",
messages = messages
)
chat_response = completion.choices[0].message.content
code = re.findall('<.*>', chat_response)
code_new = '\n'.join(code)
svg_code = f"""
{code_new}
"""
svg2png(bytestring=svg_code,write_to='output.png')
return 'output.png'
with gr.Blocks(css="style.css") as demo:
gr.HTML("<center>"
"<h1>๐๐ช๐ - ControlNet with GPT-4</h1>"
"</center>")
gr.Markdown("## <center>๐ Born to Create: Controllable Text-to-Image Generation with GPT-4</center>")
gr.DuplicateButton(
value="Duplicate Space for private use",
elem_id="duplicate-button",
visible=SHOW_DUPLICATE_BUTTON,
)
with gr.Tab("GPT-4 Control"):
with gr.Row():
with gr.Column():
inp1 = gr.Textbox(label="OpenAI API Key", type="password")
inp2 = gr.Textbox(label="Position Prompt (as simple as possible)")
btn1 = gr.Button("GPT-4 Control", variant="primary")
with gr.Column():
out1 = gr.Image(label="Output Image", type="pil")
btn1.click(gpt_control, [inp1, inp2], [out1])
with gr.Tabs():
with gr.TabItem("Canny"):
create_demo_canny(model.process_canny)
with gr.TabItem("MLSD"):
create_demo_mlsd(model.process_mlsd)
with gr.TabItem("Scribble"):
create_demo_scribble(model.process_scribble)
with gr.TabItem("Scribble Interactive"):
create_demo_scribble_interactive(model.process_scribble_interactive)
with gr.TabItem("SoftEdge"):
create_demo_softedge(model.process_softedge)
with gr.TabItem("OpenPose"):
create_demo_openpose(model.process_openpose)
with gr.TabItem("Segmentation"):
create_demo_segmentation(model.process_segmentation)
with gr.TabItem("Depth"):
create_demo_depth(model.process_depth)
with gr.TabItem("Normal map"):
create_demo_normal(model.process_normal)
with gr.TabItem("Lineart"):
create_demo_lineart(model.process_lineart)
with gr.TabItem("Content Shuffle"):
create_demo_shuffle(model.process_shuffle)
with gr.TabItem("Instruct Pix2Pix"):
create_demo_ip2p(model.process_ip2p)
with gr.Accordion(label="Base model", open=False):
with gr.Row():
with gr.Column(scale=5):
current_base_model = gr.Text(label="Current base model")
with gr.Column(scale=1):
check_base_model_button = gr.Button("Check current base model")
with gr.Row():
with gr.Column(scale=5):
new_base_model_id = gr.Text(
label="New base model",
max_lines=1,
placeholder="runwayml/stable-diffusion-v1-5",
info="The base model must be compatible with Stable Diffusion v1.5.",
interactive=ALLOW_CHANGING_BASE_MODEL,
)
with gr.Column(scale=1):
change_base_model_button = gr.Button("Change base model", interactive=ALLOW_CHANGING_BASE_MODEL)
if not ALLOW_CHANGING_BASE_MODEL:
gr.Markdown(
"""The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space."""
)
check_base_model_button.click(
fn=lambda: model.base_model_id,
outputs=current_base_model,
queue=False,
api_name="check_base_model",
)
gr.on(
triggers=[new_base_model_id.submit, change_base_model_button.click],
fn=model.set_base_model,
inputs=new_base_model_id,
outputs=current_base_model,
api_name=False,
)
if __name__ == "__main__":
demo.queue(max_size=20).launch()
|