Datasets:

License:
yjernite HF staff commited on
Commit
e118f26
1 Parent(s): 04caed6

supporting code

Browse files
Files changed (1) hide show
  1. diffusion_bias_utils.py +338 -0
diffusion_bias_utils.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from glob import glob
2
+ from os.path import join as pjoin
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import plotly.express as px
7
+ import plotly.graph_objects as go
8
+ import torch
9
+ import umap.umap_ as umap
10
+ from PIL import Image
11
+ from scipy.cluster.hierarchy import dendrogram, linkage
12
+ from scipy.spatial.distance import squareform
13
+ from sklearn.preprocessing import normalize
14
+ from tqdm import tqdm
15
+
16
+
17
+ ###
18
+ # Get text embeddings from sentence-transformers model
19
+ ###
20
+ def sentence_mean_pooling(model_output, attention_mask):
21
+ token_embeddings = model_output[
22
+ 0
23
+ ] # First element of model_output contains all token embeddings
24
+ input_mask_expanded = (
25
+ attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
26
+ )
27
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
28
+ input_mask_expanded.sum(1), min=1e-9
29
+ )
30
+
31
+
32
+ def compute_text_embeddings(sentences, text_tokenizer, text_model):
33
+ batch = text_tokenizer(
34
+ sentences, padding=True, truncation=True, return_tensors="pt"
35
+ )
36
+ with torch.no_grad():
37
+ model_output = text_model(**batch)
38
+ sentence_embeds = sentence_mean_pooling(model_output, batch["attention_mask"])
39
+ sentence_embeds /= sentence_embeds.norm(dim=-1, keepdim=True)
40
+ return sentence_embeds
41
+
42
+
43
+ ###
44
+ # Get image embeddings from BLIP VQA models
45
+ ###
46
+ # returns the average pixel embedding from the last layer of the image encoder
47
+ def get_compute_image_embedding_blip_vqa_pixels(
48
+ img, blip_processor, blip_model, device="cpu"
49
+ ):
50
+ pixel_values = blip_processor(img, "", return_tensors="pt")["pixel_values"].to(
51
+ device
52
+ )
53
+ with torch.no_grad():
54
+ vision_outputs = blip_model.vision_model(
55
+ pixel_values=pixel_values,
56
+ output_hidden_states=True,
57
+ )
58
+ image_embeds = vision_outputs[0].sum(dim=1).squeeze()
59
+ image_embeds /= image_embeds.norm()
60
+ return image_embeds.detach().cpu().numpy()
61
+
62
+
63
+ # returns the average token embedding from the question encoder (conditioned on the image) along with the generated answer
64
+ # adapted from:
65
+ # https://github.com/huggingface/transformers/blob/2411f0e465e761790879e605a4256f3d4afb7f82/src/transformers/models/blip/modeling_blip.py#L1225
66
+ def get_compute_image_embedding_blip_vqa_question(
67
+ img, blip_processor, blip_model, question=None, device="cpu"
68
+ ):
69
+ question = "what word best describes this person?" if question is None else question
70
+ inputs = blip_processor(img, question, return_tensors="pt")
71
+ with torch.no_grad():
72
+ # make question embeddings
73
+ vision_outputs = blip_model.vision_model(
74
+ pixel_values=inputs["pixel_values"].to(device),
75
+ output_hidden_states=True,
76
+ )
77
+ image_embeds = vision_outputs[0]
78
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
79
+ question_embeds = blip_model.text_encoder(
80
+ input_ids=inputs["input_ids"].to(device),
81
+ attention_mask=inputs["attention_mask"].to(device),
82
+ encoder_hidden_states=image_embeds,
83
+ encoder_attention_mask=image_attention_mask,
84
+ return_dict=False,
85
+ )
86
+ question_embeds = question_embeds[0]
87
+ # generate outputs
88
+ question_attention_mask = torch.ones(
89
+ question_embeds.size()[:-1], dtype=torch.long
90
+ ).to(question_embeds.device)
91
+ bos_ids = torch.full(
92
+ (question_embeds.size(0), 1),
93
+ fill_value=blip_model.decoder_bos_token_id,
94
+ device=question_embeds.device,
95
+ )
96
+ outputs = blip_model.text_decoder.generate(
97
+ input_ids=bos_ids,
98
+ eos_token_id=blip_model.config.text_config.sep_token_id,
99
+ pad_token_id=blip_model.config.text_config.pad_token_id,
100
+ encoder_hidden_states=question_embeds,
101
+ encoder_attention_mask=question_attention_mask,
102
+ # **generate_kwargs,
103
+ )
104
+ answer = blip_processor.decode(outputs[0], skip_special_tokens=True)
105
+ # average and normalize question embeddings
106
+ res_question_embeds = question_embeds.sum(dim=1).squeeze()
107
+ res_question_embeds /= res_question_embeds.norm()
108
+ res_question_embeds = res_question_embeds.detach().cpu().numpy()
109
+ return (res_question_embeds, answer)
110
+
111
+
112
+ ###
113
+ # Plotting utilities: 2D and 3D projection + scatter plots
114
+ ###
115
+ def make_2d_plot(embeds, text_list, color_list=None, shape_list=None, umap_spread=10):
116
+ # default color and shape
117
+ color_list = [0 for _ in text_list] if color_list is None else color_list
118
+ shape_list = ["circle" for _ in text_list] if shape_list is None else shape_list
119
+ # project to 2D
120
+ fit = umap.UMAP(
121
+ metric="cosine",
122
+ n_neighbors=len(embeds) - 1,
123
+ min_dist=1,
124
+ n_components=2,
125
+ spread=umap_spread,
126
+ )
127
+ u = fit.fit_transform(embeds)
128
+ fig = go.Figure()
129
+ fig.add_trace(
130
+ go.Scatter(
131
+ x=u[:, 0].tolist(),
132
+ y=u[:, 1].tolist(),
133
+ mode="markers",
134
+ name="nodes",
135
+ marker=dict(
136
+ symbol=shape_list,
137
+ color=color_list,
138
+ ),
139
+ text=text_list,
140
+ hoverinfo="text",
141
+ marker_line_color="midnightblue",
142
+ marker_line_width=2,
143
+ marker_size=10,
144
+ opacity=0.8,
145
+ )
146
+ )
147
+ fig.update_yaxes(
148
+ scaleanchor="x",
149
+ scaleratio=1,
150
+ )
151
+ fig.update_layout(
152
+ autosize=False,
153
+ width=800,
154
+ height=800,
155
+ )
156
+ fig.layout.showlegend = False
157
+ return fig
158
+
159
+
160
+ def make_3d_plot(embeds, text_list, color_list=None, shape_list=None, umap_spread=10):
161
+ # default color and shape
162
+ color_list = [0 for _ in text_list] if color_list is None else color_list
163
+ shape_list = ["circle" for _ in text_list] if shape_list is None else shape_list
164
+ # project to 3D
165
+ fit = umap.UMAP(
166
+ metric="cosine",
167
+ n_neighbors=len(embeds) - 1,
168
+ min_dist=1,
169
+ n_components=3,
170
+ spread=umap_spread,
171
+ )
172
+ u = fit.fit_transform(embeds)
173
+ # make nodes
174
+ df = pd.DataFrame(
175
+ {
176
+ "x": u[:, 0].tolist(),
177
+ "y": u[:, 1].tolist(),
178
+ "z": u[:, 2].tolist(),
179
+ "color": color_list,
180
+ "hover": text_list,
181
+ "symbol": shape_list,
182
+ "size": [5 for _ in text_list],
183
+ }
184
+ )
185
+ fig = px.scatter_3d(
186
+ df,
187
+ x="x",
188
+ y="y",
189
+ z="z",
190
+ color="color",
191
+ symbol="symbol",
192
+ size="size",
193
+ hover_data={
194
+ "hover": True,
195
+ "x": False,
196
+ "y": False,
197
+ "z": False,
198
+ "color": False,
199
+ "symbol": False,
200
+ "size": False,
201
+ },
202
+ )
203
+ fig.layout.showlegend = False
204
+ return fig
205
+
206
+
207
+ ###
208
+ # Plotting utilities: cluster re-ordering and heatmaps
209
+ ###
210
+ ### Some utility functions to get the similarities between two lists of arrays
211
+ # average pairwise similarity
212
+ def sim_pairwise_avg(vecs_1, vecs_2):
213
+ res = np.matmul(np.array(vecs_1), np.array(vecs_2).transpose()).mean()
214
+ return res
215
+
216
+
217
+ # distance between (normalized) centroids
218
+ def sim_centroids(vecs_1, vecs_2):
219
+ res = np.dot(
220
+ normalize(np.array(vecs_1).mean(axis=0, keepdims=True), norm="l2")[0],
221
+ normalize(np.array(vecs_2).mean(axis=0, keepdims=True), norm="l2")[0],
222
+ )
223
+ return res
224
+
225
+
226
+ # distance to nearest neighbot/examplar
227
+ def sim_pairwise_examplar(vecs_1, vecs_2):
228
+ res = np.matmul(np.array(vecs_1), np.array(vecs_2).transpose()).max()
229
+ return res
230
+
231
+
232
+ # To make pretty heatmaps, similar rows need to be close to each other
233
+ # we achieve that by computing a hierarchical clustering of the points
234
+ # then ordering the items as the leaves of a dendrogram
235
+ def get_cluster_order(similarity_matrix, label_names=None):
236
+ label_names = (
237
+ ["" for _ in range(similarity_matrix.shape[0])]
238
+ if label_names is None
239
+ else label_names
240
+ )
241
+ dissimilarity = 1 - similarity_matrix
242
+ np.fill_diagonal(dissimilarity, 0.0)
243
+ # checks = False because similarity checks can fail for torch to numpy conversion
244
+ Z = linkage(squareform(dissimilarity, checks=False), "average")
245
+ # no_plot when inside a function call required because of jupyter/matplotlib issue
246
+ ddgr = dendrogram(
247
+ Z, labels=label_names, orientation="top", leaf_rotation=90, no_plot=True
248
+ )
249
+ cluster_order = ddgr["leaves"]
250
+ return cluster_order
251
+
252
+
253
+ # then make heat map from similarity matrix
254
+ def make_heat_map(sim_matrix, labels_x, labels_y, scale=25):
255
+ fig = go.Figure(
256
+ data=go.Heatmap(z=sim_matrix, x=labels_x, y=labels_y, hoverongaps=False)
257
+ )
258
+ fig.update_yaxes(
259
+ scaleanchor="x",
260
+ scaleratio=1,
261
+ )
262
+ fig.update_layout(
263
+ autosize=False,
264
+ width=scale * len(labels_x),
265
+ height=scale * len(labels_y),
266
+ )
267
+ fig.layout.showlegend = False
268
+ return fig
269
+
270
+
271
+ # bring things together for a square heatmap
272
+ def build_heat_map_square(
273
+ img_list, embed_field, sim_fun, label_list, row_order=None, hm_scale=20
274
+ ):
275
+ sim_mat = np.zeros((len(img_list), len(img_list)))
276
+ for i, dct_i in enumerate(img_list):
277
+ for j, dct_j in enumerate(img_list):
278
+ sim_mat[i, j] = sim_fun(dct_i[embed_field], dct_j[embed_field])
279
+ # optionally reorder labels and similarity matrix to be prettier
280
+ if row_order is None:
281
+ row_order = get_cluster_order(sim_mat)
282
+ labels_sorted = [label_list[i] for i in row_order]
283
+ sim_mat_sorted = sim_mat[np.ix_(row_order, row_order)]
284
+ # make heatmap from similarity matrix
285
+ heatmap_fig = make_heat_map(
286
+ sim_mat_sorted, labels_sorted, labels_sorted, scale=hm_scale
287
+ )
288
+ return heatmap_fig
289
+
290
+
291
+ # bring things together for a rectangle heatmap: across lists
292
+ def build_heat_map_rect(
293
+ img_list_rows,
294
+ img_list_cols,
295
+ label_list_rows,
296
+ label_list_cols,
297
+ embed_field,
298
+ sim_fun,
299
+ center=False,
300
+ temperature=5,
301
+ hm_scale=20,
302
+ ):
303
+ sim_mat = np.zeros((len(img_list_rows), len(img_list_cols)))
304
+ for i, dct_i in enumerate(img_list_rows):
305
+ for j, dct_j in enumerate(img_list_cols):
306
+ sim_mat[i, j] = sim_fun(dct_i[embed_field], dct_j[embed_field])
307
+ # normalize and substract mean
308
+ sim_mat_exp = np.exp(temperature * sim_mat)
309
+ sim_mat_exp /= sim_mat_exp.sum(axis=1, keepdims=1)
310
+ if center:
311
+ sim_mat_exp_avg = sim_mat_exp.mean(axis=0, keepdims=1)
312
+ sim_mat_exp -= sim_mat_exp_avg
313
+ sim_mat_exp_avg = sim_mat_exp_avg * sim_mat_exp.max() / sim_mat_exp_avg.max()
314
+ # rows are reordered by decreasing norm,
315
+ sim_mat_norm = np.sum(sim_mat_exp * sim_mat_exp, axis=1)
316
+ row_order = np.argsort(sim_mat_norm, axis=-1)
317
+ row_labels_sorted = [label_list_rows[i] for i in row_order]
318
+ if center:
319
+ # columns are ordered by bias
320
+ col_order = np.argsort(sim_mat_exp_avg.sum(axis=0), axis=-1)
321
+ else:
322
+ # columns sre reordered by similarity
323
+ sim_mat_exp_norm = normalize(sim_mat_exp, axis=0, norm="l2")
324
+ cluster_cols_sim_mat = np.matmul(sim_mat_exp_norm.transpose(), sim_mat_exp_norm)
325
+ col_order = get_cluster_order(cluster_cols_sim_mat)
326
+ col_labels_sorted = [label_list_cols[i] for i in col_order]
327
+ # make heatmap from similarity matrix
328
+ if center:
329
+ row_order = list(row_order) + [len(row_order), len(row_order) + 1]
330
+ row_labels_sorted = row_labels_sorted + ["_", "AVERAGE"]
331
+ sim_mat_exp = np.concatenate(
332
+ [sim_mat_exp, np.zeros_like(sim_mat_exp_avg), sim_mat_exp_avg], axis=0
333
+ )
334
+ sim_mat_exp_sorted = sim_mat_exp[np.ix_(row_order, col_order)]
335
+ heatmap_fig = make_heat_map(
336
+ sim_mat_exp_sorted, col_labels_sorted, row_labels_sorted, scale=hm_scale
337
+ )
338
+ return heatmap_fig