|
from glob import glob |
|
from os.path import join as pjoin |
|
|
|
import numpy as np |
|
import pandas as pd |
|
import plotly.express as px |
|
import plotly.graph_objects as go |
|
import torch |
|
import umap.umap_ as umap |
|
from PIL import Image |
|
from scipy.cluster.hierarchy import dendrogram, linkage |
|
from scipy.spatial.distance import squareform |
|
from sklearn.preprocessing import normalize |
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
|
|
|
def sentence_mean_pooling(model_output, attention_mask): |
|
token_embeddings = model_output[ |
|
0 |
|
] |
|
input_mask_expanded = ( |
|
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() |
|
) |
|
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( |
|
input_mask_expanded.sum(1), min=1e-9 |
|
) |
|
|
|
|
|
def compute_text_embeddings(sentences, text_tokenizer, text_model): |
|
batch = text_tokenizer( |
|
sentences, padding=True, truncation=True, return_tensors="pt" |
|
) |
|
with torch.no_grad(): |
|
model_output = text_model(**batch) |
|
sentence_embeds = sentence_mean_pooling(model_output, batch["attention_mask"]) |
|
sentence_embeds /= sentence_embeds.norm(dim=-1, keepdim=True) |
|
return sentence_embeds |
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_compute_image_embedding_blip_vqa_pixels( |
|
img, blip_processor, blip_model, device="cpu" |
|
): |
|
pixel_values = blip_processor(img, "", return_tensors="pt")["pixel_values"].to( |
|
device |
|
) |
|
with torch.no_grad(): |
|
vision_outputs = blip_model.vision_model( |
|
pixel_values=pixel_values, |
|
output_hidden_states=True, |
|
) |
|
image_embeds = vision_outputs[0].sum(dim=1).squeeze() |
|
image_embeds /= image_embeds.norm() |
|
return image_embeds.detach().cpu().numpy() |
|
|
|
|
|
|
|
|
|
|
|
def get_compute_image_embedding_blip_vqa_question( |
|
img, blip_processor, blip_model, question=None, device="cpu" |
|
): |
|
question = "what word best describes this person?" if question is None else question |
|
inputs = blip_processor(img, question, return_tensors="pt") |
|
with torch.no_grad(): |
|
|
|
vision_outputs = blip_model.vision_model( |
|
pixel_values=inputs["pixel_values"].to(device), |
|
output_hidden_states=True, |
|
) |
|
image_embeds = vision_outputs[0] |
|
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long) |
|
question_embeds = blip_model.text_encoder( |
|
input_ids=inputs["input_ids"].to(device), |
|
attention_mask=inputs["attention_mask"].to(device), |
|
encoder_hidden_states=image_embeds, |
|
encoder_attention_mask=image_attention_mask, |
|
return_dict=False, |
|
) |
|
question_embeds = question_embeds[0] |
|
|
|
question_attention_mask = torch.ones( |
|
question_embeds.size()[:-1], dtype=torch.long |
|
).to(question_embeds.device) |
|
bos_ids = torch.full( |
|
(question_embeds.size(0), 1), |
|
fill_value=blip_model.decoder_bos_token_id, |
|
device=question_embeds.device, |
|
) |
|
outputs = blip_model.text_decoder.generate( |
|
input_ids=bos_ids, |
|
eos_token_id=blip_model.config.text_config.sep_token_id, |
|
pad_token_id=blip_model.config.text_config.pad_token_id, |
|
encoder_hidden_states=question_embeds, |
|
encoder_attention_mask=question_attention_mask, |
|
|
|
) |
|
answer = blip_processor.decode(outputs[0], skip_special_tokens=True) |
|
|
|
res_question_embeds = question_embeds.sum(dim=1).squeeze() |
|
res_question_embeds /= res_question_embeds.norm() |
|
res_question_embeds = res_question_embeds.detach().cpu().numpy() |
|
return (res_question_embeds, answer) |
|
|
|
|
|
|
|
|
|
|
|
def make_2d_plot(embeds, text_list, color_list=None, shape_list=None, umap_spread=10): |
|
|
|
color_list = [0 for _ in text_list] if color_list is None else color_list |
|
shape_list = ["circle" for _ in text_list] if shape_list is None else shape_list |
|
|
|
fit = umap.UMAP( |
|
metric="cosine", |
|
n_neighbors=len(embeds) - 1, |
|
min_dist=1, |
|
n_components=2, |
|
spread=umap_spread, |
|
) |
|
u = fit.fit_transform(embeds) |
|
fig = go.Figure() |
|
fig.add_trace( |
|
go.Scatter( |
|
x=u[:, 0].tolist(), |
|
y=u[:, 1].tolist(), |
|
mode="markers", |
|
name="nodes", |
|
marker=dict( |
|
symbol=shape_list, |
|
color=color_list, |
|
), |
|
text=text_list, |
|
hoverinfo="text", |
|
marker_line_color="midnightblue", |
|
marker_line_width=2, |
|
marker_size=10, |
|
opacity=0.8, |
|
) |
|
) |
|
fig.update_yaxes( |
|
scaleanchor="x", |
|
scaleratio=1, |
|
) |
|
fig.update_layout( |
|
autosize=False, |
|
width=800, |
|
height=800, |
|
) |
|
fig.layout.showlegend = False |
|
return fig |
|
|
|
|
|
def make_3d_plot(embeds, text_list, color_list=None, shape_list=None, umap_spread=10): |
|
|
|
color_list = [0 for _ in text_list] if color_list is None else color_list |
|
shape_list = ["circle" for _ in text_list] if shape_list is None else shape_list |
|
|
|
fit = umap.UMAP( |
|
metric="cosine", |
|
n_neighbors=len(embeds) - 1, |
|
min_dist=1, |
|
n_components=3, |
|
spread=umap_spread, |
|
) |
|
u = fit.fit_transform(embeds) |
|
|
|
df = pd.DataFrame( |
|
{ |
|
"x": u[:, 0].tolist(), |
|
"y": u[:, 1].tolist(), |
|
"z": u[:, 2].tolist(), |
|
"color": color_list, |
|
"hover": text_list, |
|
"symbol": shape_list, |
|
"size": [5 for _ in text_list], |
|
} |
|
) |
|
fig = px.scatter_3d( |
|
df, |
|
x="x", |
|
y="y", |
|
z="z", |
|
color="color", |
|
symbol="symbol", |
|
size="size", |
|
hover_data={ |
|
"hover": True, |
|
"x": False, |
|
"y": False, |
|
"z": False, |
|
"color": False, |
|
"symbol": False, |
|
"size": False, |
|
}, |
|
) |
|
fig.layout.showlegend = False |
|
return fig |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def sim_pairwise_avg(vecs_1, vecs_2): |
|
res = np.matmul(np.array(vecs_1), np.array(vecs_2).transpose()).mean() |
|
return res |
|
|
|
|
|
|
|
def sim_centroids(vecs_1, vecs_2): |
|
res = np.dot( |
|
normalize(np.array(vecs_1).mean(axis=0, keepdims=True), norm="l2")[0], |
|
normalize(np.array(vecs_2).mean(axis=0, keepdims=True), norm="l2")[0], |
|
) |
|
return res |
|
|
|
|
|
|
|
def sim_pairwise_examplar(vecs_1, vecs_2): |
|
res = np.matmul(np.array(vecs_1), np.array(vecs_2).transpose()).max() |
|
return res |
|
|
|
|
|
|
|
|
|
|
|
def get_cluster_order(similarity_matrix, label_names=None): |
|
label_names = ( |
|
["" for _ in range(similarity_matrix.shape[0])] |
|
if label_names is None |
|
else label_names |
|
) |
|
dissimilarity = 1 - similarity_matrix |
|
np.fill_diagonal(dissimilarity, 0.0) |
|
|
|
Z = linkage(squareform(dissimilarity, checks=False), "average") |
|
|
|
ddgr = dendrogram( |
|
Z, labels=label_names, orientation="top", leaf_rotation=90, no_plot=True |
|
) |
|
cluster_order = ddgr["leaves"] |
|
return cluster_order |
|
|
|
|
|
|
|
def make_heat_map(sim_matrix, labels_x, labels_y, scale=25): |
|
fig = go.Figure( |
|
data=go.Heatmap(z=sim_matrix, x=labels_x, y=labels_y, hoverongaps=False) |
|
) |
|
fig.update_yaxes( |
|
scaleanchor="x", |
|
scaleratio=1, |
|
) |
|
fig.update_layout( |
|
autosize=False, |
|
width=scale * len(labels_x), |
|
height=scale * len(labels_y), |
|
) |
|
fig.layout.showlegend = False |
|
return fig |
|
|
|
|
|
|
|
def build_heat_map_square( |
|
img_list, embed_field, sim_fun, label_list, row_order=None, hm_scale=20 |
|
): |
|
sim_mat = np.zeros((len(img_list), len(img_list))) |
|
for i, dct_i in enumerate(img_list): |
|
for j, dct_j in enumerate(img_list): |
|
sim_mat[i, j] = sim_fun(dct_i[embed_field], dct_j[embed_field]) |
|
|
|
if row_order is None: |
|
row_order = get_cluster_order(sim_mat) |
|
labels_sorted = [label_list[i] for i in row_order] |
|
sim_mat_sorted = sim_mat[np.ix_(row_order, row_order)] |
|
|
|
heatmap_fig = make_heat_map( |
|
sim_mat_sorted, labels_sorted, labels_sorted, scale=hm_scale |
|
) |
|
return heatmap_fig |
|
|
|
|
|
|
|
def build_heat_map_rect( |
|
img_list_rows, |
|
img_list_cols, |
|
label_list_rows, |
|
label_list_cols, |
|
embed_field, |
|
sim_fun, |
|
center=False, |
|
temperature=5, |
|
hm_scale=20, |
|
): |
|
sim_mat = np.zeros((len(img_list_rows), len(img_list_cols))) |
|
for i, dct_i in enumerate(img_list_rows): |
|
for j, dct_j in enumerate(img_list_cols): |
|
sim_mat[i, j] = sim_fun(dct_i[embed_field], dct_j[embed_field]) |
|
|
|
sim_mat_exp = np.exp(temperature * sim_mat) |
|
sim_mat_exp /= sim_mat_exp.sum(axis=1, keepdims=1) |
|
if center: |
|
sim_mat_exp_avg = sim_mat_exp.mean(axis=0, keepdims=1) |
|
sim_mat_exp -= sim_mat_exp_avg |
|
sim_mat_exp_avg = sim_mat_exp_avg * sim_mat_exp.max() / sim_mat_exp_avg.max() |
|
|
|
sim_mat_norm = np.sum(sim_mat_exp * sim_mat_exp, axis=1) |
|
row_order = np.argsort(sim_mat_norm, axis=-1) |
|
row_labels_sorted = [label_list_rows[i] for i in row_order] |
|
if center: |
|
|
|
col_order = np.argsort(sim_mat_exp_avg.sum(axis=0), axis=-1) |
|
else: |
|
|
|
sim_mat_exp_norm = normalize(sim_mat_exp, axis=0, norm="l2") |
|
cluster_cols_sim_mat = np.matmul(sim_mat_exp_norm.transpose(), sim_mat_exp_norm) |
|
col_order = get_cluster_order(cluster_cols_sim_mat) |
|
col_labels_sorted = [label_list_cols[i] for i in col_order] |
|
|
|
if center: |
|
row_order = list(row_order) + [len(row_order), len(row_order) + 1] |
|
row_labels_sorted = row_labels_sorted + ["_", "AVERAGE"] |
|
sim_mat_exp = np.concatenate( |
|
[sim_mat_exp, np.zeros_like(sim_mat_exp_avg), sim_mat_exp_avg], axis=0 |
|
) |
|
sim_mat_exp_sorted = sim_mat_exp[np.ix_(row_order, col_order)] |
|
heatmap_fig = make_heat_map( |
|
sim_mat_exp_sorted, col_labels_sorted, row_labels_sorted, scale=hm_scale |
|
) |
|
return heatmap_fig |
|
|