|
import gradio as gr |
|
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler |
|
import torch |
|
|
|
def load_model(): |
|
try: |
|
|
|
pipeline = StableDiffusionPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-2-1", |
|
torch_dtype=torch.float32, |
|
safety_checker=None |
|
) |
|
except Exception as e: |
|
print(f"Error loading the model: {e}") |
|
raise |
|
|
|
|
|
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) |
|
|
|
|
|
try: |
|
pipeline = pipeline.to("cpu") |
|
except Exception as e: |
|
print(f"Error moving the model to device: {e}") |
|
raise |
|
|
|
return pipeline |
|
|
|
|
|
|
|
try: |
|
model = load_model() |
|
except Exception as e: |
|
print(f"Error initializing the model: {e}") |
|
|
|
|
|
def generate(prompt, guidance_scale=7.5, num_inference_steps=50): |
|
try: |
|
|
|
images = model(prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images |
|
return images[0] |
|
except Exception as e: |
|
return f"Error generating image: {e}" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
prompt = gr.Textbox(label="Enter your prompt") |
|
guidance_scale = gr.Slider(1.0, 10.0, value=7.5, label="Guidance Scale") |
|
steps = gr.Slider(10, 100, value=50, label="Number of Inference Steps") |
|
with gr.Row(): |
|
submit = gr.Button("Generate") |
|
with gr.Row(): |
|
output = gr.Image() |
|
|
|
submit.click(generate, inputs=[prompt, guidance_scale, steps], outputs=output) |
|
|
|
demo.launch() |
|
|