|
import gradio as gr |
|
from diffusers import DiffusionPipeline |
|
import torch |
|
|
|
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16") |
|
pipe.enable_model_cpu_offload() |
|
|
|
def infer(prompt): |
|
|
|
prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe." |
|
image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0] |
|
|
|
return image |
|
|
|
with gr.Blocks() as demo: |
|
|
|
with gr.Column(): |
|
|
|
with gr.Row(): |
|
prompt = gr.Text( |
|
label="Prompt", |
|
show_label=False, |
|
max_lines=1, |
|
placeholder="Enter your prompt", |
|
container=False, |
|
) |
|
run_button = gr.Button("Run", scale=0) |
|
|
|
result = gr.Image(label="Result", show_label=False) |
|
|
|
run_button.click( |
|
fn = infer, |
|
inputs = [prompt], |
|
outputs = [result] |
|
) |
|
|
|
demo.queue().launch() |