AhmedSSabir commited on
Commit
bf2635f
1 Parent(s): b623db4

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -0
app.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from doctest import OutputChecker
3
+ import sys
4
+ import torch
5
+ import re
6
+ import os
7
+ import gradio as gr
8
+ import requests
9
+
10
+ #url = "https://github.com/simonepri/lm-scorer/tree/master/lm_scorer/models"
11
+ #resp = requests.get(url)
12
+
13
+ from sentence_transformers import SentenceTransformer, util
14
+ #from sentence_transformers import SentenceTransformer, util
15
+ #from sklearn.metrics.pairwise import cosine_similarity
16
+ #from lm_scorer.models.auto import AutoLMScorer as LMScorer
17
+ #from sentence_transformers import SentenceTransformer, util
18
+ #from sklearn.metrics.pairwise import cosine_similarity
19
+
20
+ #device = "cuda:0" if torch.cuda.is_available() else "cpu"
21
+ #model_sts = gr.Interface.load('huggingface/sentence-transformers/stsb-distilbert-base')
22
+
23
+ model_sts = SentenceTransformer('stsb-distilbert-base')
24
+
25
+ #batch_size = 1
26
+ #scorer = LMScorer.from_pretrained('gpt2' , device=device, batch_size=batch_size)
27
+
28
+ #import torch
29
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
30
+ import numpy as np
31
+ import re
32
+
33
+
34
+
35
+
36
+ def Sort_Tuple(tup):
37
+
38
+ # (Sorts in descending order)
39
+ tup.sort(key = lambda x: x[1])
40
+ return tup[::-1]
41
+
42
+
43
+ def softmax(x):
44
+ exps = np.exp(x)
45
+ return np.divide(exps, np.sum(exps))
46
+
47
+
48
+ def get_sim(x):
49
+ x = str(x)[1:-1]
50
+ x = str(x)[1:-1]
51
+ return x
52
+
53
+
54
+ # Load pre-trained model
55
+
56
+ model = GPT2LMHeadModel.from_pretrained('distilgpt2', output_hidden_states = True, output_attentions = True)
57
+
58
+ #model = gr.Interface.load('huggingface/distilgpt2', output_hidden_states = True, output_attentions = True)
59
+
60
+ #model.eval()
61
+ #tokenizer = gr.Interface.load('huggingface/distilgpt2')
62
+
63
+ tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
64
+ #tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
65
+
66
+
67
+ def cloze_prob(text):
68
+
69
+ whole_text_encoding = tokenizer.encode(text)
70
+ # Parse out the stem of the whole sentence (i.e., the part leading up to but not including the critical word)
71
+ text_list = text.split()
72
+ stem = ' '.join(text_list[:-1])
73
+ stem_encoding = tokenizer.encode(stem)
74
+ # cw_encoding is just the difference between whole_text_encoding and stem_encoding
75
+ # note: this might not correspond exactly to the word itself
76
+ cw_encoding = whole_text_encoding[len(stem_encoding):]
77
+ # Run the entire sentence through the model. Then go "back in time" to look at what the model predicted for each token, starting at the stem.
78
+ # Put the whole text encoding into a tensor, and get the model's comprehensive output
79
+ tokens_tensor = torch.tensor([whole_text_encoding])
80
+
81
+ with torch.no_grad():
82
+ outputs = model(tokens_tensor)
83
+ predictions = outputs[0]
84
+
85
+ logprobs = []
86
+ # start at the stem and get downstream probabilities incrementally from the model(see above)
87
+ start = -1-len(cw_encoding)
88
+ for j in range(start,-1,1):
89
+ raw_output = []
90
+ for i in predictions[-1][j]:
91
+ raw_output.append(i.item())
92
+
93
+ logprobs.append(np.log(softmax(raw_output)))
94
+
95
+ # if the critical word is three tokens long, the raw_probabilities should look something like this:
96
+ # [ [0.412, 0.001, ... ] ,[0.213, 0.004, ...], [0.002,0.001, 0.93 ...]]
97
+ # Then for the i'th token we want to find its associated probability
98
+ # this is just: raw_probabilities[i][token_index]
99
+ conditional_probs = []
100
+ for cw,prob in zip(cw_encoding,logprobs):
101
+ conditional_probs.append(prob[cw])
102
+ # now that you have all the relevant probabilities, return their product.
103
+ # This is the probability of the critical word given the context before it.
104
+
105
+ return np.exp(np.sum(conditional_probs))
106
+
107
+
108
+
109
+
110
+
111
+ def cos_sim(a, b):
112
+ return np.inner(a, b) / (np.linalg.norm(a) * (np.linalg.norm(b)))
113
+
114
+
115
+
116
+ #def Visual_re_ranker(caption, visual_context_label, visual_context_prob):
117
+ def Visual_re_ranker(caption_man, caption_woman, visual_context_label, visual_context_prob):
118
+ caption_man = caption_man
119
+ caption_woman = caption_woman
120
+ visual_context_label= visual_context_label
121
+ visual_context_prob = visual_context_prob
122
+ caption_emb_man = model_sts.encode(caption_man, convert_to_tensor=True)
123
+ caption_emb_woman = model_sts.encode(caption_woman, convert_to_tensor=True)
124
+ visual_context_label_emb = model_sts.encode(visual_context_label, convert_to_tensor=True)
125
+
126
+ sim_m = cosine_scores = util.pytorch_cos_sim(caption_emb_man, visual_context_label_emb)
127
+ sim_m = sim_m.cpu().numpy()
128
+ sim_m = get_sim(sim_m)
129
+
130
+ sim_w = cosine_scores = util.pytorch_cos_sim(caption_emb_woman, visual_context_label_emb)
131
+ sim_w = sim_w.cpu().numpy()
132
+ sim_w = get_sim(sim_w)
133
+
134
+
135
+ LM_man = cloze_prob(caption_man)
136
+ LM_woman = cloze_prob(caption_woman)
137
+ #LM = scorer.sentence_score(caption, reduce="mean")
138
+ score_man = pow(float(LM_man),pow((1-float(sim_m))/(1+ float(sim_m)),1-float(visual_context_prob)))
139
+ score_woman = pow(float(LM_woman),pow((1-float(sim_w))/(1+ float(sim_w)),1-float(visual_context_prob)))
140
+
141
+
142
+
143
+
144
+ #return {"LM": float(LM)/1, "sim": float(sim)/1, "score": float(score)/1 }
145
+ return {"Man": float(score_man)/1, "Woman": float(score_woman)/1}
146
+ #return LM, sim, score
147
+
148
+
149
+
150
+
151
+
152
+ demo = gr.Interface(
153
+ fn=Visual_re_ranker,
154
+ description="Demo for Women Wearing Lipstick: Measuring the Bias Between Object and Its Related Gender",
155
+ inputs=[gr.Textbox(value="a man sitting on a surfboard in the ocean") , gr.Textbox(value="a man sitting on a surfboard in the ocean"), gr.Textbox(value="paddle"), gr.Textbox(value="0.5283")],
156
+ #outputs=[gr.Textbox(value="Language Model Score") , gr.Textbox(value="Semantic Similarity Score"), gr.Textbox(value="Belief revision score via visual context")],
157
+ outputs="label",
158
+ )
159
+ demo.launch()
160
+