time-diffusion / app.py
Bahjat Kawar
first commit
3f7ead4
import gradio as gr
from time_main import edit_model, generate_for_text
with gr.Blocks() as demo:
gr.Markdown("<center><h2>TIME: Text-to-Image Model Editing</h2>Demo for the paper <a href=\"https://time-diffusion.github.io/\" style=\"color:black;\">\"Editing Implicit Assumptions in Text-to-Image Diffusion Models\"</a>. Implemented with Stable Diffusion v1.4.</center>")
with gr.Box():
gr.Markdown("1. Edit a concept in a text-to-image model by specifying an under-specified \"source\" prompt, and a similar \"destination\" prompt with an additional specification.")
with gr.Row():
src = gr.Textbox(label = "Source Prompt", placeholder="e.g., A pack of roses")
dst = gr.Textbox(label = "Destination Prompt", placeholder="e.g., A pack of blue roses")
with gr.Row():
lamb_val = gr.Slider(value = 0.1, minimum=0.01, maximum=10000, label = "Strength of regularization (lambda)", interactive = True)
with gr.Row():
edit_btn = gr.Button("Edit Model")
with gr.Row():
gr.HTML(value = "<br />")
with gr.Row():
edit_status = gr.HTML(value="<b>Current model status:</b> Unedited")
edit_btn.click(fn=edit_model, inputs=[src, dst, lamb_val], outputs=edit_status)
with gr.Box():
gr.Markdown("2. After editing, try any test prompt and see the effect on the generated images!")
with gr.Row():
tst = gr.Textbox(label = "Test Prompt", placeholder="e.g., A field of roses")
with gr.Row():
gen_btn = gr.Button("Generate Image")
with gr.Row():
gr.HTML(value = "<br />")
with gr.Row():
out_img = gr.Image(label="Generated Image")
gen_btn.click(fn=generate_for_text, inputs=tst, outputs=out_img)
demo.launch()