Spaces:
Sleeping
Sleeping
File size: 3,669 Bytes
f901652 bb1c525 90deeeb 23f2de3 90deeeb b024062 23f2de3 c3d14af 23f2de3 bb1c525 c3d14af 207d269 bb1c525 aed67d7 c3d14af aed67d7 207d269 9ef80c7 207d269 bfc412c 9ef80c7 207d269 4fe0601 bb1c525 aed67d7 255b6b2 aed67d7 bfc412c eef5127 bfc412c bb1c525 207d269 c3d14af 207d269 255b6b2 59df77b 207d269 bb1c525 58cedc4 207d269 e11bea1 cf49511 c3d14af cf49511 105e936 0dbdb20 bb1c525 23f2de3 bb1c525 23f2de3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import gradio as gr
from src.const import MODEL_CHOICES
from src.example import EXAMPLES
from src.inference import inference
def build_interface():
"""Build Gradio Interface"""
theme = gr.themes.Default(primary_hue=gr.themes.colors.emerald)
with gr.Blocks(theme=theme) as interface:
gr.Markdown(f"# Stable Diffusion Demo")
with gr.Row():
with gr.Column():
prompt = gr.Text(label="Prompt", placeholder="Enter a prompt here")
model_id = gr.Dropdown(
label="Model ID",
choices=MODEL_CHOICES,
value="stabilityai/stable-diffusion-3-medium-diffusers",
)
# Additional Input Settings
with gr.Accordion("Additional Settings", open=False):
negative_prompt = gr.Text(label="Negative Prompt", value="", )
with gr.Row():
width = gr.Number(label="Width", value=512, step=64, minimum=64, maximum=2048)
height = gr.Number(label="Height", value=512, step=64, minimum=64, maximum=2048)
num_images = gr.Number(label="Num Images", value=4, minimum=1, maximum=10, step=1)
seed = gr.Number(label="Seed", value=8888, step=1)
guidance_scale = gr.Slider(label="Guidance Scale", value=7.5, step=0.5, minimum=0, maximum=10)
num_inference_step = gr.Slider(
label="Num Inference Steps", value=50, minimum=1, maximum=100, step=2
)
with gr.Row():
use_safety_checker = gr.Checkbox(value=True, label='Use Safety Checker')
use_model_offload = gr.Checkbox(value=False, label='Use Model Offload')
with gr.Accordion(label='Notes', open=False):
# language=HTML
notes = gr.HTML(
"""
<h2>Negative Embeddings</h2>
<p>If you want to use negative embedding, use the following tokens in the prompt.</p>
<ul>
<li><a href='https://civitai.com/models/59614/badneganatomy-textual-inversion'>BadNegAnatomyV1-neg</a></li>
<li><a href='https://civitai.com/models/4629/deep-negative-v1x'>DeepNegativeV1</a> </li>
<li><a href='https://civitai.com/models/7808/easynegative'>EasyNegative</a></li>
<li><a href='https://civitai.com/models/56519/negativehand-negative-embedding'>negative_hand-neg</a></li>
</ul>
"""
)
with gr.Column():
output_image = gr.Image(label="Image", type="pil")
inputs = [
prompt,
model_id,
negative_prompt,
width,
height,
guidance_scale,
num_inference_step,
num_images,
use_safety_checker,
use_model_offload,
seed,
]
btn = gr.Button("Generate", variant='primary')
btn.click(
fn=inference,
inputs=inputs,
outputs=output_image
)
gr.Examples(
examples=EXAMPLES,
inputs=inputs,
outputs=output_image,
fn=inference,
cache_examples='lazy'
)
return interface
if __name__ == "__main__":
iface = build_interface()
iface.queue().launch()
|