AhmedSSabir
commited on
Commit
•
30b25c5
1
Parent(s):
be87a75
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import os
|
|
7 |
import gradio as gr
|
8 |
import requests
|
9 |
import torch
|
10 |
-
|
11 |
from torch.nn.functional import softmax
|
12 |
import numpy as np
|
13 |
|
@@ -16,19 +16,11 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
16 |
|
17 |
from huggingface_hub import login
|
18 |
|
19 |
-
# just for the sake of this demo, we use cloze prob to initialize the hypothesis
|
20 |
-
|
21 |
-
#url = "https://github.com/simonepri/lm-scorer/tree/master/lm_scorer/models"
|
22 |
-
#resp = requests.get(url)
|
23 |
|
24 |
from sentence_transformers import SentenceTransformer, util
|
25 |
|
26 |
model_sts = SentenceTransformer('stsb-distilbert-base')
|
27 |
-
#model_sts = SentenceTransformer('roberta-large-nli-stsb-mean-tokens')
|
28 |
-
#batch_size = 1
|
29 |
-
#scorer = LMScorer.from_pretrained('gpt2' , device=device, batch_size=batch_size)
|
30 |
|
31 |
-
#import torch
|
32 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
33 |
import numpy as np
|
34 |
import re
|
@@ -40,21 +32,6 @@ def get_sim(x):
|
|
40 |
x = str(x)[1:-1]
|
41 |
return x
|
42 |
|
43 |
-
|
44 |
-
# Load pre-trained model
|
45 |
-
|
46 |
-
#model = GPT2LMHeadModel.from_pretrained('distilgpt2', output_hidden_states = True, output_attentions = True)
|
47 |
-
#model = GPT2LMHeadModel.from_pretrained('gpt2', output_hidden_states = True, output_attentions = True)
|
48 |
-
#model = gr.Interface.load('huggingface/distilgpt2', output_hidden_states = True, output_attentions = True)
|
49 |
-
|
50 |
-
#model.eval()
|
51 |
-
#tokenizer = gr.Interface.load('huggingface/distilgpt2')
|
52 |
-
|
53 |
-
#tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
|
54 |
-
#tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
|
55 |
-
#tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
|
56 |
-
|
57 |
-
|
58 |
import os
|
59 |
#print(os.getenv('HF_token'))
|
60 |
hf_api_token = os.getenv("HF_token") # For sensitive secrets
|
@@ -69,9 +46,6 @@ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
|
|
69 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
|
70 |
|
71 |
|
72 |
-
#tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
73 |
-
#model = GPT2LMHeadModel.from_pretrained('gpt2')
|
74 |
-
|
75 |
|
76 |
|
77 |
def sentence_prob_mean(text):
|
@@ -144,7 +118,7 @@ def Visual_re_ranker(caption_man, caption_woman, visual_context_label, context_p
|
|
144 |
|
145 |
demo = gr.Interface(
|
146 |
fn=Visual_re_ranker,
|
147 |
-
description="Demo for Women Wearing Lipstick: Measuring the Bias Between Object and Its Related Gender (distilbert)",
|
148 |
inputs=[gr.Textbox(value="a man riding a motorcycle on a road") , gr.Textbox(value="a woman riding a motorcycle on a road"), gr.Textbox(value="motor scooter"), gr.Textbox(value="0.2183")],
|
149 |
|
150 |
|
|
|
7 |
import gradio as gr
|
8 |
import requests
|
9 |
import torch
|
10 |
+
|
11 |
from torch.nn.functional import softmax
|
12 |
import numpy as np
|
13 |
|
|
|
16 |
|
17 |
from huggingface_hub import login
|
18 |
|
|
|
|
|
|
|
|
|
19 |
|
20 |
from sentence_transformers import SentenceTransformer, util
|
21 |
|
22 |
model_sts = SentenceTransformer('stsb-distilbert-base')
|
|
|
|
|
|
|
23 |
|
|
|
24 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
25 |
import numpy as np
|
26 |
import re
|
|
|
32 |
x = str(x)[1:-1]
|
33 |
return x
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
import os
|
36 |
#print(os.getenv('HF_token'))
|
37 |
hf_api_token = os.getenv("HF_token") # For sensitive secrets
|
|
|
46 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
|
47 |
|
48 |
|
|
|
|
|
|
|
49 |
|
50 |
|
51 |
def sentence_prob_mean(text):
|
|
|
118 |
|
119 |
demo = gr.Interface(
|
120 |
fn=Visual_re_ranker,
|
121 |
+
description="Demo for Women Wearing Lipstick: Measuring the Bias Between Object and Its Related Gender (LLAMA-3.2-1B with distilbert)",
|
122 |
inputs=[gr.Textbox(value="a man riding a motorcycle on a road") , gr.Textbox(value="a woman riding a motorcycle on a road"), gr.Textbox(value="motor scooter"), gr.Textbox(value="0.2183")],
|
123 |
|
124 |
|