File size: 7,640 Bytes
713ec7d
 
 
3fad0f4
 
 
713ec7d
 
bd26015
 
 
 
 
 
 
34a77d9
62013d0
 
0e95c6d
 
713ec7d
0e95c6d
 
7d2016d
 
 
 
713ec7d
7d24c56
 
 
 
c331654
 
 
 
 
 
 
 
01e731e
 
 
 
 
c331654
39a46ec
 
 
 
c331654
 
 
 
 
 
 
fac7910
01e731e
 
 
 
 
39a46ec
01e731e
 
 
 
 
 
 
 
 
 
 
 
 
27a0683
 
713ec7d
b107b62
713ec7d
c331654
713ec7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8506797
713ec7d
 
 
 
 
b8b3bb8
afd35b0
f7c4b88
b107b62
1e8d7dc
 
 
 
 
 
afd35b0
6d11fe7
 
b107b62
 
fac7910
 
 
 
 
 
 
 
f7c4b88
fac7910
a12399b
 
 
 
 
9525232
b107b62
9525232
 
 
 
 
 
01e731e
9525232
 
 
 
01e731e
9525232
 
 
 
 
 
 
 
 
 
 
b107b62
b8b3bb8
 
 
fac7910
7389249
7d24c56
f933a72
 
0e95c6d
f933a72
fac7910
f933a72
7d24c56
f933a72
 
7389249
 
1e8d7dc
713ec7d
 
1e8d7dc
 
713ec7d
3e7f1a1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
import gradio as gr
from PIL import Image  
import torch

from diffusers import StableDiffusionPipeline
from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d


# if sd_options == 'SD1.5':
# model = "runwayml/stable-diffusion-v1-5"
# elif sd_options == 'SD2.1':
# model = "stabilityai/stable-diffusion-2-1"
# else:
# model = "CompVis/stable-diffusion-v1-4"

torch.manual_seed(42)
model_id = "CompVis/stable-diffusion-v1-4"
        
# pip_sd = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
# pip_sd = pip_sd.to("cuda")

# pip_freeu = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
# pip_freeu = pip_freeu.to("cuda")
# # -------- freeu block registration
# register_free_upblock2d(pip_freeu, b1=1.2, b2=1.4, s1=0.9, s2=0.2)
# register_free_crossattn_upblock2d(pip_freeu, b1=1.2, b2=1.4, s1=0.9, s2=0.2)
# # -------- freeu block registration

model_id = "CompVis/stable-diffusion-v1-4"
pip_1_4 = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pip_1_4 = pip_1_4.to("cuda")

model_id = "runwayml/stable-diffusion-v1-5"
pip_1_5 = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pip_1_5 = pip_1_5.to("cuda")

model_id = "stabilityai/stable-diffusion-2-1"
pip_2_1 = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pip_2_1 = pip_2_1.to("cuda")

prompt_prev = None
sd_options_prev = None
seed_prev = None 
sd_image_prev = None

def infer(prompt, sd_options, seed, b1, b2, s1, s2):
    global prompt_prev
    global sd_options_prev
    global seed_prev
    global sd_image_prev

    if sd_options == 'SD1.5':
        pip = pip_1_5
    elif sd_options == 'SD2.1':
        pip = pip_2_1
    else:
        pip = pip_1_4

    run_baseline = False
    if prompt != prompt_prev or sd_options != sd_options_prev or seed != seed_prev:
        run_baseline = True
        prompt_prev = prompt
        sd_options_prev = sd_options
        seed_prev = seed

    if run_baseline:
        register_free_upblock2d(pip, b1=1.0, b2=1.0, s1=1.0, s2=1.0)
        register_free_crossattn_upblock2d(pip, b1=1.0, b2=1.0, s1=1.0, s2=1.0)
       
        torch.manual_seed(seed)
        print("Generating SD:")
        sd_image = pip(prompt, num_inference_steps=25).images[0]  
        sd_image_prev = sd_image
    else:
        sd_image = sd_image_prev

    
    register_free_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
    register_free_crossattn_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)

    torch.manual_seed(seed)
    print("Generating FreeU:")
    freeu_image = pip(prompt, num_inference_steps=25).images[0]  

    # First SD, then freeu
    images = [sd_image, freeu_image]

    return images


examples = [
    [
        "A small cabin on top of a snowy mountain in the style of Disney, artstation",
    ],
    [
        "a monkey doing yoga on the beach",
    ],
    [
        "half human half cat, a human cat hybrid",
    ],
    [
        "a hedgehog using a calculator",
    ],
    [
        "kanye west | diffuse lighting | fantasy | intricate elegant highly detailed lifelike photorealistic digital painting | artstation",
    ],
    [
        "astronaut pig",
    ],
    [
        "two people shouting at each other",
    ],
    [
        "A linked in profile picture of Elon Musk",
    ],
    [
        "A man looking out of a rainy window",
    ],
    [
        "close up, iron man, eating breakfast in a cabin, symmetrical balance, hyper-realistic --ar 16:9 --style raw"
    ],
    [
        'A high tech solarpunk utopia in the Amazon rainforest',
    ],
    [
        'A pikachu fine dining with a view to the Eiffel Tower',
    ],
    [
        'A mecha robot in a favela in expressionist style',
    ],
    [
        'an insect robot preparing a delicious meal',
    ],
]
    
    
css = """
h1 {
  text-align: center;
}

#component-0 {
  max-width: 730px;
  margin: auto;
}
"""

block = gr.Blocks(css='style.css')

options = ['SD1.4', 'SD1.5', 'SD2.1']

with block:
    gr.Markdown("SD vs. FreeU.")
    with gr.Group():
        with gr.Row(): 
            sd_options = gr.Dropdown(["SD1.4", "SD1.5", "SD2.1"], label="SD options")

            # if sd_options == 'SD1.5':
            #     sd = 1.5
            # elif sd_options == 'SD2.1':
            #     sd = 2.1
            # else:
            #     sd = 1.4
            
            # pip = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
            # pip = pip.to("cuda")
            
            with gr.Row():
                with gr.Column():
                    text = gr.Textbox(
                        label="Enter your prompt",
                        show_label=False,
                        max_lines=1,
                        placeholder="Enter your prompt",
                        container=False,
                    )
                btn = gr.Button("Generate image", scale=0)
                
                seed = gr.Slider(label='seed',
                                        minimum=0,
                                        maximum=1000,
                                        step=1,
                                        value=42)

    
    with gr.Group():
        with gr.Row():
            with gr.Accordion('FreeU Parameters: b', open=True):
                b1 = gr.Slider(label='b1: backbone factor of the first stage block of decoder',
                                        minimum=1,
                                        maximum=1.6,
                                        step=0.01,
                                        value=1)
                b2 = gr.Slider(label='b2: backbone factor of the second stage block of decoder',
                                        minimum=1,
                                        maximum=1.6,
                                        step=0.01,
                                        value=1)
            with gr.Accordion('FreeU Parameters: s', open=True):
                s1 = gr.Slider(label='s1: skip factor of the first stage block of decoder',
                                        minimum=0,
                                        maximum=1,
                                        step=0.1,
                                        value=1)
                s2 = gr.Slider(label='s2: skip factor of the second stage block of decoder',
                                        minimum=0,
                                        maximum=1,
                                        step=0.1,
                                        value=1)    
                    
    with gr.Row():
        with gr.Group():
            # btn = gr.Button("Generate image", scale=0)
            with gr.Row():
                with gr.Column() as c1:
                    image_1 = gr.Image(interactive=False)
                    image_1_label = gr.Markdown("SD")
            
        with gr.Group():
            # btn = gr.Button("Generate image", scale=0)
            with gr.Row():
                with gr.Column() as c2:
                    image_2 = gr.Image(interactive=False)
                    image_2_label = gr.Markdown("FreeU")
        
        
    ex = gr.Examples(examples=examples, fn=infer, inputs=[text, sd_options, seed, b1, b2, s1, s2], outputs=[image_1, image_2], cache_examples=False)
    ex.dataset.headers = [""]

    text.submit(infer, inputs=[text, sd_options, seed, b1, b2, s1, s2], outputs=[image_1, image_2])
    btn.click(infer, inputs=[text, sd_options, seed, b1, b2, s1, s2], outputs=[image_1, image_2])

block.launch()
# block.queue(default_enabled=False).launch(share=False)