|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from doctest import OutputChecker |
|
import sys |
|
import torch |
|
import re |
|
import os |
|
import gradio as gr |
|
import requests |
|
import torch |
|
|
|
from torch.nn.functional import softmax |
|
import numpy as np |
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
from huggingface_hub import login |
|
|
|
|
|
|
|
|
|
|
|
from sentence_transformers import SentenceTransformer, util |
|
|
|
|
|
model_sts = SentenceTransformer('roberta-large-nli-stsb-mean-tokens') |
|
|
|
|
|
|
|
|
|
from transformers import GPT2Tokenizer, GPT2LMHeadModel |
|
import numpy as np |
|
import re |
|
|
|
|
|
|
|
def get_sim(x): |
|
x = str(x)[1:-1] |
|
x = str(x)[1:-1] |
|
return x |
|
|
|
|
|
|
|
|
|
import os |
|
|
|
hf_api_token = os.getenv("HF_token") |
|
|
|
|
|
|
|
access_token = hf_api_token |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B") |
|
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def sentence_prob_mean(text): |
|
|
|
input_ids = tokenizer.encode(text, return_tensors='pt') |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(input_ids, labels=input_ids) |
|
logits = outputs.logits |
|
|
|
|
|
shift_logits = logits[..., :-1, :].contiguous() |
|
shift_labels = input_ids[..., 1:].contiguous() |
|
|
|
|
|
probs = softmax(shift_logits, dim=-1) |
|
|
|
|
|
gathered_probs = torch.gather(probs, 2, shift_labels.unsqueeze(-1)).squeeze(-1) |
|
|
|
|
|
mean_prob = torch.mean(gathered_probs).item() |
|
|
|
return mean_prob |
|
|
|
|
|
|
|
|
|
|
|
def cos_sim(a, b): |
|
return np.inner(a, b) / (np.linalg.norm(a) * (np.linalg.norm(b))) |
|
|
|
|
|
|
|
def Visual_re_ranker(caption_G, caption_B, caption_VR, visual_context_label, visual_context_prob): |
|
caption_G = caption_G |
|
caption_B = caption_B |
|
caption_VR = caption_VR |
|
visual_context_label= visual_context_label |
|
visual_context_prob = visual_context_prob |
|
caption_emb_G = model_sts.encode(caption_G, convert_to_tensor=True) |
|
caption_emb_B = model_sts.encode(caption_B, convert_to_tensor=True) |
|
caption_emb_VR = model_sts.encode(caption_VR, convert_to_tensor=True) |
|
|
|
visual_context_label_emb = model_sts.encode(visual_context_label, convert_to_tensor=True) |
|
|
|
|
|
sim_1 = cosine_scores = util.pytorch_cos_sim(caption_emb_G, visual_context_label_emb) |
|
sim_1 = sim_1.cpu().numpy() |
|
sim_1 = get_sim(sim_1) |
|
|
|
sim_2 = cosine_scores = util.pytorch_cos_sim(caption_emb_B, visual_context_label_emb) |
|
sim_2 = sim_2.cpu().numpy() |
|
sim_2 = get_sim(sim_2) |
|
|
|
sim_3 = cosine_scores = util.pytorch_cos_sim(caption_emb_VR, visual_context_label_emb) |
|
sim_3 = sim_3.cpu().numpy() |
|
sim_3 = get_sim(sim_3) |
|
|
|
|
|
LM_1 = sentence_prob_mean(caption_G) |
|
LM_2 = sentence_prob_mean(caption_B) |
|
LM_3 = sentence_prob_mean(caption_VR) |
|
|
|
|
|
score_1 = pow(float(LM_1),pow((1-float(sim_1))/(1+ float(sim_1)),1-float(visual_context_prob))) |
|
score_2 = pow(float(LM_2),pow((1-float(sim_2))/(1+ float(sim_2)),1-float(visual_context_prob))) |
|
score_3 = pow(float(LM_3),pow((1-float(sim_3))/(1+ float(sim_3)),1-float(visual_context_prob))) |
|
|
|
|
|
return {"Greedy": float(score_1)/1, "Best-Beam-5": float(score_2)/1, "Visual_re-Ranker": float(score_3)/1 } |
|
|
|
|
|
|
|
|
|
|
|
|
|
demo = gr.Interface( |
|
fn=Visual_re_ranker, |
|
|
|
description="Demo for Caption Re-ranker with Visual Semantic Information", |
|
|
|
|
|
inputs=[gr.Textbox(value="baby is eating in front of a birthday cake") , gr.Textbox(value="a baby sitting in front of a cake"), gr.Textbox(value="a baby sitting in front of a birthday cake"), gr.Textbox(value="candle wax light"), gr.Textbox(value="0.958")], |
|
|
|
outputs="label", |
|
) |
|
|
|
demo.launch() |