Spaces:
Running
Running
import gradio as gr | |
import spaces | |
import torch | |
from PIL import Image | |
from diffusers import DiffusionPipeline | |
DIFFUSERS_MODEL_IDS = [ | |
"stabilityai/stable-diffusion-3-medium-diffusers", | |
"stabilityai/stable-diffusion-xl-base-1.0", | |
"stabilityai/stable-diffusion-2-1", | |
"runwayml/stable-diffusion-v1-5", | |
] | |
EXTERNAL_MODEL_URL_MAPPING = { | |
"Beautiful Realistic Asians": "https://civitai.com/api/download/models/177164?type=Model&format=SafeTensor&size=full&fp=fp16", | |
} | |
MODEL_CHOICES = DIFFUSERS_MODEL_IDS + list(EXTERNAL_MODEL_URL_MAPPING.keys()) | |
# Global Variables | |
current_model_id = "stabilityai/stable-diffusion-3-medium-diffusers" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
if device == 'cuda': | |
pipe = DiffusionPipeline.from_pretrained( | |
current_model_id, | |
torch_dtype=torch.float16, | |
).to(device) | |
def inference( | |
model_id: str, | |
prompt: str, | |
negative_prompt: str = "", | |
progress=gr.Progress(track_tqdm=True), | |
) -> Image.Image: | |
progress(0, "Starting inference...") | |
global current_model_id, pipe | |
if model_id != current_model_id: | |
try: | |
pipe = DiffusionPipeline.from_pretrained( | |
model_id, | |
torch_dtype=torch.float16, | |
).to(device) | |
current_model_id = model_id | |
except Exception as e: | |
raise gr.Error(str(e)) | |
image = pipe( | |
prompt, | |
negative_prompt=negative_prompt, | |
).images[0] | |
return image | |
if __name__ == "__main__": | |
with gr.Blocks() as demo: | |
gr.Markdown(f"# Stable Diffusion Demo") | |
with gr.Row(): | |
with gr.Column(): | |
inputs = [ | |
gr.Dropdown( | |
label="Model ID", | |
choices=MODEL_CHOICES, | |
value="stabilityai/stable-diffusion-3-medium-diffusers", | |
), | |
gr.Text(label="Prompt", value=""), | |
gr.Text(label="Negative Prompt", value=""), | |
] | |
with gr.Accordion("Additional Settings (W.I.P)", open=False): | |
with gr.Row(): | |
width_component = gr.Number(label="Width", value=512, step=64, minimum=64, maximum=1024) | |
height_component = gr.Number(label="Height", value=512, step=64, minimum=64, maximum=1024) | |
additional_inputs = [ | |
width_component, | |
height_component, | |
gr.Number(label="Guidance Scale", value=7.5, step=0.5, minimum=0, maximum=10), | |
gr.Slider(label="Num Inference Steps", value=None, minimum=1, maximum=1000, step=1) | |
] | |
with gr.Column(): | |
outputs = [ | |
gr.Image(label="Image", type="pil"), | |
] | |
gr.Examples( | |
examples=[ | |
['stabilityai/stable-diffusion-3-medium-diffusers', 'A cat holding a sign that says Hello world', ""], | |
['stabilityai/stable-diffusion-3-medium-diffusers', 'Beautiful pixel art of a Wizard with hovering text "Achivement unlocked: Diffusion models can spell now"', ''], | |
['stabilityai/stable-diffusion-3-medium-diffusers', 'A corgi wearing sunglasses says "U-Net is OVER!!"', ''], | |
], | |
inputs=inputs, | |
) | |
btn = gr.Button("Generate") | |
btn.click(fn=inference, inputs=inputs, outputs=outputs) | |
demo.queue().launch() | |