lkc290 commited on
Commit
5fc3f91
1 Parent(s): 494b083

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +193 -0
app.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from threading import Lock
2
+ import math
3
+ import os
4
+ import random
5
+
6
+ from diffusers import StableDiffusionPipeline
7
+ from diffusers.models.attention import get_global_heat_map, clear_heat_maps
8
+ from matplotlib import pyplot as plt
9
+ import gradio as gr
10
+ import torch
11
+ import torch.nn.functional as F
12
+ import spacy
13
+
14
+ if not os.environ.get('NO_DOWNLOAD_SPACY'):
15
+ spacy.cli.download('en_core_web_sm')
16
+
17
+ model_id = "CompVis/stable-diffusion-v1-4"
18
+ device = "cuda"
19
+
20
+ gen = torch.Generator(device='cuda')
21
+ gen.manual_seed(12758672)
22
+ orig_state = gen.get_state()
23
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True).to(device)
24
+ lock = Lock()
25
+ nlp = spacy.load('en_core_web_sm')
26
+
27
+
28
+ def expand_m(m, n: int = 1, o=512, mode='bicubic'):
29
+ m = m.unsqueeze(0).unsqueeze(0) / n
30
+ m = F.interpolate(m.float().detach(), size=(o, o), mode='bicubic', align_corners=False)
31
+ m = (m - m.min()) / (m.max() - m.min() + 1e-8)
32
+ m = m.cpu().detach()
33
+
34
+ return m
35
+
36
+
37
+ @torch.no_grad()
38
+ def predict(prompt, inf_steps, threshold):
39
+ global lock
40
+ with torch.cuda.amp.autocast(), lock:
41
+ try:
42
+ plt.close('all')
43
+ except:
44
+ pass
45
+
46
+ gen.set_state(orig_state.clone())
47
+ clear_heat_maps()
48
+
49
+ out = pipe(prompt, guidance_scale=7.5, height=512, width=512, do_intermediates=False, generator=gen,
50
+ num_inference_steps=int(inf_steps))
51
+ heat_maps = get_global_heat_map()
52
+
53
+ with torch.cuda.amp.autocast(dtype=torch.float32):
54
+ m = 0
55
+ n = 0
56
+ w = ''
57
+ w_idx = 0
58
+
59
+ fig, ax = plt.subplots()
60
+ ax.imshow(out.images[0].cpu().float().detach().permute(1, 2, 0).numpy())
61
+ ax.set_xticks([])
62
+ ax.set_yticks([])
63
+
64
+ fig1, axs1 = plt.subplots(math.ceil(len(out.words) / 4), 4) # , figsize=(20, 20))
65
+ fig2, axs2 = plt.subplots(math.ceil(len(out.words) / 4), 4) # , figsize=(20, 20))
66
+
67
+ for idx in range(len(out.words) + 1):
68
+ if idx == 0:
69
+ continue
70
+
71
+ word = out.words[idx - 1]
72
+ m += heat_maps[idx]
73
+ n += 1
74
+ w += word
75
+
76
+ if '</w>' not in word:
77
+ continue
78
+ else:
79
+ mplot = expand_m(m, n)
80
+ spotlit_im = out.images[0].cpu().float().detach()
81
+ w = w.replace('</w>', '')
82
+ spotlit_im2 = torch.cat((spotlit_im, (1 - mplot.squeeze(0)).pow(1)), dim=0)
83
+
84
+ if len(out.words) <= 4:
85
+ a1 = axs1[w_idx % 4]
86
+ a2 = axs2[w_idx % 4]
87
+ else:
88
+ a1 = axs1[w_idx // 4, w_idx % 4]
89
+ a2 = axs2[w_idx // 4, w_idx % 4]
90
+
91
+ a1.set_xticks([])
92
+ a1.set_yticks([])
93
+ a1.imshow(mplot.squeeze().numpy(), cmap='jet')
94
+ a1.imshow(spotlit_im2.permute(1, 2, 0).numpy())
95
+ a1.set_title(w)
96
+
97
+ mask = torch.ones_like(mplot)
98
+ mask[mplot < threshold * mplot.max()] = 0
99
+ im2 = spotlit_im * mask.squeeze(0)
100
+ a2.set_xticks([])
101
+ a2.set_yticks([])
102
+ a2.imshow(im2.permute(1, 2, 0).numpy())
103
+ a2.set_title(w)
104
+ m = 0
105
+ n = 0
106
+ w_idx += 1
107
+ w = ''
108
+
109
+ for idx in range(w_idx, len(axs1.flatten())):
110
+ fig1.delaxes(axs1.flatten()[idx])
111
+ fig2.delaxes(axs2.flatten()[idx])
112
+
113
+ return fig, fig1, fig2
114
+
115
+
116
+ def set_prompt(prompt):
117
+ return prompt
118
+
119
+
120
+ with gr.Blocks() as demo:
121
+ md = '''# DAAM: Attention Maps for Interpreting Stable Diffusion
122
+ Check out the paper: [What the DAAM: Interpreting Stable Diffusion Using Cross Attention](http://arxiv.org/abs/2210.04885). Note that, due to server costs, this demo will transition to HuggingFace Spaces on 2022-10-20.
123
+ '''
124
+ gr.Markdown(md)
125
+
126
+ with gr.Row():
127
+ with gr.Column():
128
+ dropdown = gr.Dropdown([
129
+ 'A monkey wearing a halloween costume',
130
+ 'A smiling, red cat chewing gum',
131
+ # 'Doing research at Comcast Applied AI labs',
132
+ # 'Professor Jimmy Lin from the University of Waterloo',
133
+ # 'Yann Lecun teaching machine learning on a chalkboard',
134
+ # 'A cat eating cake for her birthday',
135
+ # 'Steak and dollars on a plate',
136
+ # 'A fox, a dog, and a wolf in a field'
137
+ ], label='Examples', value='An angry, bald man doing research')
138
+
139
+ text = gr.Textbox(label='Prompt', value='An angry, bald man doing research')
140
+ slider1 = gr.Slider(15, 35, value=25, interactive=True, step=1, label='Inference steps')
141
+ slider2 = gr.Slider(0, 1.0, value=0.4, interactive=True, step=0.05, label='Threshold (tau)')
142
+ submit_btn = gr.Button('Submit')
143
+
144
+ with gr.Tab('Original Image'):
145
+ p0 = gr.Plot()
146
+
147
+ with gr.Tab('Soft DAAM Maps'):
148
+ p1 = gr.Plot()
149
+
150
+ with gr.Tab('Hard DAAM Maps'):
151
+ p2 = gr.Plot()
152
+
153
+ submit_btn.click(fn=predict, inputs=[text, slider1, slider2], outputs=[p0, p1, p2])
154
+ dropdown.change(set_prompt, dropdown, text)
155
+ dropdown.update()
156
+
157
+ # ADDED PART
158
+ # import portpicker
159
+
160
+ # port = portpicker.pick_unused_port()
161
+ # select_ip = "0.0.0.0:"+str(port)
162
+ # print("Port: ", port)
163
+
164
+
165
+
166
+ # from IPython.display import Javascript
167
+
168
+
169
+ # def show_port(port, height=400):
170
+ # display(Javascript("""
171
+ # (async ()=>{
172
+ # fm = document.createElement('iframe')
173
+ # fm.src = await google.colab.kernel.proxyPort(%s)
174
+ # fm.width = '95%%'
175
+ # fm.height = '%d'
176
+ # fm.frameBorder = 0
177
+ # document.body.append(fm)
178
+ # })();
179
+ # """ % (port, height)))
180
+
181
+ # get_ipython().system_raw(f'python3 -m http.server {port} &')
182
+ # show_port(port)
183
+ ###
184
+
185
+
186
+
187
+ demo.launch(share=True)
188
+ # demo.launch(server_name='0.0.0.0', server_port=port)
189
+
190
+
191
+
192
+
193
+