John6666's picture
Upload 3 files
1cee750 verified
import spaces
import gradio as gr
import torch
import os
from diffusers import DiffusionPipeline
import torch._dynamo
torch._dynamo.config.suppress_errors = False
torch._inductor.config.disable_progress = False
print(os.environ)
import subprocess
subprocess.run("pip list", shell=True)
dtype = torch.float32
device = "cuda" if torch.cuda.is_available() else "cpu"
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True)
pipe.to(device)
pipe2 = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
pipe2.to(device)
@spaces.GPU(duration=59)
def infer(prompt: str, progress=gr.Progress(track_tqdm=True)):
image = pipe(
prompt=prompt,
output_type="pil",
).images[0]
return image
@spaces.GPU(duration=59)
def infer2(prompt: str, progress=gr.Progress(track_tqdm=True)):
image = pipe2(
prompt=prompt,
output_type="pil",
).images[0]
return image
examples = [
"a tiny astronaut hatching from an egg on the moon",
"a cat holding a sign that says hello world",
"an anime illustration of a wiener schnitzel",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run with torch.compile()", scale=0)
run_button2 = gr.Button("Run without torch.compile()", scale=0)
result = gr.Image(label="Result", show_label=False)
gr.Examples(
examples=examples,
#fn=infer,
inputs=[prompt],
#outputs=[result, seed],
#cache_examples=True,
#cache_mode="lazy"
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[prompt],
outputs=[result]
)
run_button2.click(infer2, [prompt], [result])
demo.launch(ssr_mode=False)