AhmedSSabir commited on
Commit
e844435
1 Parent(s): 5ea9bf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -44
app.py CHANGED
@@ -7,18 +7,12 @@ import os
7
  import gradio as gr
8
  import requests
9
 
 
 
10
  #url = "https://github.com/simonepri/lm-scorer/tree/master/lm_scorer/models"
11
  #resp = requests.get(url)
12
 
13
  from sentence_transformers import SentenceTransformer, util
14
- #from sentence_transformers import SentenceTransformer, util
15
- #from sklearn.metrics.pairwise import cosine_similarity
16
- #from lm_scorer.models.auto import AutoLMScorer as LMScorer
17
- #from sentence_transformers import SentenceTransformer, util
18
- #from sklearn.metrics.pairwise import cosine_similarity
19
-
20
-
21
- #model_sts = gr.Interface.load('huggingface/sentence-transformers/stsb-distilbert-base')
22
 
23
  model_sts = SentenceTransformer('stsb-distilbert-base')
24
  #model_sts = SentenceTransformer('roberta-large-nli-stsb-mean-tokens')
@@ -68,7 +62,6 @@ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
68
  def cloze_prob(text):
69
 
70
  whole_text_encoding = tokenizer.encode(text)
71
- # Parse out the stem of the whole sentence (i.e., the part leading up to but not including the critical word)
72
  text_list = text.split()
73
  stem = ' '.join(text_list[:-1])
74
  stem_encoding = tokenizer.encode(stem)
@@ -80,7 +73,6 @@ def cloze_prob(text):
80
  predictions = outputs[0]
81
 
82
  logprobs = []
83
- # start at the stem and get downstream probabilities incrementally from the model(see above)
84
  start = -1-len(cw_encoding)
85
  for j in range(start,-1,1):
86
  raw_output = []
@@ -93,8 +85,7 @@ def cloze_prob(text):
93
  conditional_probs = []
94
  for cw,prob in zip(cw_encoding,logprobs):
95
  conditional_probs.append(prob[cw])
96
- # now that you have all the relevant probabilities, return their product.
97
- # This is the probability of the critical word given the context before it.
98
 
99
  return np.exp(np.sum(conditional_probs))
100
 
@@ -106,38 +97,7 @@ def cos_sim(a, b):
106
  return np.inner(a, b) / (np.linalg.norm(a) * (np.linalg.norm(b)))
107
 
108
 
109
-
110
- #def Visual_re_ranker(caption, visual_context_label, visual_context_prob):
111
- #def Visual_re_ranker(caption_man, caption_woman, visual_context_label, visual_context_prob):
112
- # caption_man = caption_man
113
- # caption_woman = caption_woman
114
- # visual_context_label= visual_context_label
115
- # visual_context_prob = visual_context_prob
116
- # caption_emb_man = model_sts.encode(caption_man, convert_to_tensor=True)
117
- # caption_emb_woman = model_sts.encode(caption_woman, convert_to_tensor=True)
118
- # visual_context_label_emb = model_sts.encode(visual_context_label, convert_to_tensor=True)
119
-
120
- # sim_m = cosine_scores = util.pytorch_cos_sim(caption_emb_man, visual_context_label_emb)
121
- # sim_m = sim_m.cpu().numpy()
122
- # sim_m = get_sim(sim_m)
123
-
124
- # sim_w = cosine_scores = util.pytorch_cos_sim(caption_emb_woman, visual_context_label_emb)
125
- # sim_w = sim_w.cpu().numpy()
126
- # sim_w = get_sim(sim_w)
127
-
128
-
129
- # LM_man = cloze_prob(caption_man)
130
- # LM_woman = cloze_prob(caption_woman)
131
- #LM = scorer.sentence_score(caption, reduce="mean")
132
- # score_man = pow(float(LM_man),pow((1-float(sim_m))/(1+ float(sim_m)),1-float(visual_context_prob)))
133
- # score_woman = pow(float(LM_woman),pow((1-float(sim_w))/(1+ float(sim_w)),1-float(visual_context_prob)))
134
-
135
-
136
-
137
 
138
- #return {"LM": float(LM)/1, "sim": float(sim)/1, "score": float(score)/1 }
139
- # return {"Man": float(score_man)/1, "Woman": float(score_woman)/1}
140
- #return LM, sim, score
141
 
142
  def Visual_re_ranker(caption_man, caption_woman, context_label, context_prob):
143
  caption_man = caption_man
@@ -178,7 +138,7 @@ demo = gr.Interface(
178
  description="Demo for Women Wearing Lipstick: Measuring the Bias Between Object and Its Related Gender",
179
  inputs=[gr.Textbox(value="a man riding a motorcycle on a road") , gr.Textbox(value="a woman riding a motorcycle on a road"), gr.Textbox(value="motor scooter"), gr.Textbox(value="0.2183")],
180
 
181
- #inputs=[gr.Textbox(value="a man is blow drying his hair in the bathroom") , gr.Textbox(value="a woman is blow drying her hair in the bathroom"), gr.Textbox(value="hair spray"), gr.Textbox(value="0.7385")],
182
 
183
  outputs="label",
184
  )
 
7
  import gradio as gr
8
  import requests
9
 
10
+ # just for the sake of this demo, we use cloze prob to initialize the hypothesis
11
+
12
  #url = "https://github.com/simonepri/lm-scorer/tree/master/lm_scorer/models"
13
  #resp = requests.get(url)
14
 
15
  from sentence_transformers import SentenceTransformer, util
 
 
 
 
 
 
 
 
16
 
17
  model_sts = SentenceTransformer('stsb-distilbert-base')
18
  #model_sts = SentenceTransformer('roberta-large-nli-stsb-mean-tokens')
 
62
  def cloze_prob(text):
63
 
64
  whole_text_encoding = tokenizer.encode(text)
 
65
  text_list = text.split()
66
  stem = ' '.join(text_list[:-1])
67
  stem_encoding = tokenizer.encode(stem)
 
73
  predictions = outputs[0]
74
 
75
  logprobs = []
 
76
  start = -1-len(cw_encoding)
77
  for j in range(start,-1,1):
78
  raw_output = []
 
85
  conditional_probs = []
86
  for cw,prob in zip(cw_encoding,logprobs):
87
  conditional_probs.append(prob[cw])
88
+
 
89
 
90
  return np.exp(np.sum(conditional_probs))
91
 
 
97
  return np.inner(a, b) / (np.linalg.norm(a) * (np.linalg.norm(b)))
98
 
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
 
 
 
101
 
102
  def Visual_re_ranker(caption_man, caption_woman, context_label, context_prob):
103
  caption_man = caption_man
 
138
  description="Demo for Women Wearing Lipstick: Measuring the Bias Between Object and Its Related Gender",
139
  inputs=[gr.Textbox(value="a man riding a motorcycle on a road") , gr.Textbox(value="a woman riding a motorcycle on a road"), gr.Textbox(value="motor scooter"), gr.Textbox(value="0.2183")],
140
 
141
+
142
 
143
  outputs="label",
144
  )