Spaces:
Sleeping
Sleeping
import gradio as gr | |
from diffusers import StableDiffusionXLPipeline | |
import torch | |
# Cache model between generations | |
pipe = None | |
def load_model(): | |
global pipe | |
if pipe is None: | |
model_id = "stabilityai/stable-diffusion-xl-base-1.0" | |
pipe = StableDiffusionXLPipeline.from_pretrained( | |
model_id, | |
torch_dtype=torch.float16, | |
variant="fp16", | |
use_safetensors=True | |
) | |
if torch.cuda.is_available(): | |
pipe.to("cuda") | |
# Enable memory optimizations | |
pipe.enable_attention_slicing() | |
pipe.enable_model_cpu_offload() | |
return pipe | |
def generate_image(prompt, negative_prompt, steps=30, guidance_scale=7.5): | |
pipe = load_model() | |
# Generate image with reduced resolution for stability | |
image = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
num_inference_steps=int(steps), | |
guidance_scale=guidance_scale, | |
width=1024, | |
height=1024, | |
).images[0] | |
return image | |
# Optimized Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# 🖼️ SDXL Image Generator (Stable)") | |
with gr.Row(): | |
with gr.Column(): | |
prompt = gr.Textbox(label="Prompt", placeholder="Describe your image...", max_lines=2) | |
negative_prompt = gr.Textbox(label="Negative Prompt", value="low quality, blurry", max_lines=2) | |
with gr.Row(): | |
steps = gr.Slider(10, 50, value=30, label="Steps") | |
guidance = gr.Slider(1.0, 15.0, value=7.5, label="Guidance Scale") | |
submit = gr.Button("Generate", variant="primary") | |
with gr.Column(): | |
output = gr.Image(label="Result", height=512) | |
submit.click( | |
generate_image, | |
inputs=[prompt, negative_prompt, steps, guidance], | |
outputs=output | |
) | |
if __name__ == "__main__": | |
demo.launch() | |