File size: 2,427 Bytes
1cee750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import spaces
import gradio as gr
import torch
import os
from diffusers import  DiffusionPipeline
import torch._dynamo
torch._dynamo.config.suppress_errors = False
torch._inductor.config.disable_progress = False

print(os.environ)
import subprocess
subprocess.run("pip list", shell=True)

dtype = torch.float32
device = "cuda" if torch.cuda.is_available() else "cpu"
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True)
pipe.to(device)

pipe2 = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
pipe2.to(device)

@spaces.GPU(duration=59)
def infer(prompt: str, progress=gr.Progress(track_tqdm=True)):
    image = pipe(
            prompt=prompt,
            output_type="pil",
        ).images[0]
    return image

@spaces.GPU(duration=59)
def infer2(prompt: str, progress=gr.Progress(track_tqdm=True)):
    image = pipe2(
            prompt=prompt,
            output_type="pil",
        ).images[0]
    return image

examples = [
    "a tiny astronaut hatching from an egg on the moon",
    "a cat holding a sign that says hello world",
    "an anime illustration of a wiener schnitzel",
]

css="""

#col-container {

    margin: 0 auto;

    max-width: 520px;

}

"""

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        
        with gr.Row():
            
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )
            
            run_button = gr.Button("Run with torch.compile()", scale=0)
            run_button2 = gr.Button("Run without torch.compile()", scale=0)
        
        result = gr.Image(label="Result", show_label=False)
        
        gr.Examples(
            examples=examples,
            #fn=infer,
            inputs=[prompt],
            #outputs=[result, seed],
            #cache_examples=True,
            #cache_mode="lazy"
        )

    gr.on(
        triggers=[run_button.click, prompt.submit],
        fn=infer,
        inputs=[prompt],
        outputs=[result]
    )
    run_button2.click(infer2, [prompt], [result])

demo.launch(ssr_mode=False)