File size: 5,430 Bytes
f4a1b77
38c1d39
3b1a0aa
99b20e7
3b1a0aa
 
f4a1b77
 
3b1a0aa
f4a1b77
 
 
3b1a0aa
 
 
27717dd
2a965c2
f4a1b77
 
3b1a0aa
 
 
 
 
 
 
99b20e7
c59b0ef
99b20e7
40d472f
3b1a0aa
 
 
 
40d472f
 
 
 
 
 
3b1a0aa
 
 
 
40d472f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b1a0aa
 
 
99b20e7
 
 
 
 
 
 
 
 
 
 
3b1a0aa
 
 
 
 
 
 
40d472f
315cc6b
 
3b1a0aa
 
 
 
f4a1b77
55d104b
 
 
99b20e7
 
 
55d104b
 
99b20e7
8e10efe
 
55d104b
cc68b4d
55d104b
 
 
99b20e7
55d104b
 
99b20e7
 
 
 
 
 
 
 
55d104b
99b20e7
 
 
16f795a
55d104b
8e10efe
 
f4a1b77
3b1a0aa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import torch
import datasets
import gradio
import pandas

from transformers import GPT2LMHeadModel, GPT2TokenizerFast


class CrowSPairsDataset(object):
    def __init__(self):
        super().__init__()

        self.df = (datasets
                .load_dataset("BigScienceBiasEval/crows_pairs_multilingual")["test"]
                .to_pandas()
                .query('stereo_antistereo == "stereo"')
                .drop(columns="stereo_antistereo")
            )

    def sample(self, bias_type, n=10):
        return self.df[self.df["bias_type"] == bias_type].sample(n=n)

    def bias_types(self):
        return self.df.bias_type.unique().tolist()


def run(df):
    result = "<table><tr style='color: white; background-color: #555'><th>index</th><th>more stereotypical</th><th>gpt2</th><th>gpt2-large</th><th>less stereotypical<th></tr>"
    for i, row in df.iterrows():
        result += f"<tr><td>{i}</td><td style='padding: 0 1em; background-image: linear-gradient(90deg, rgba(0,255,255,0.2) 0%, rgba(255,255,255,1) 100%)'>{row['sent_more']}</td>"
        more = row["sent_more"]

        more = tokenizer(more, return_tensors="pt")["input_ids"].to(device)
        with torch.no_grad():
            out_more_gpt = model_gpt(more, labels=more.clone())
            out_more_custom = model_custom(more, labels=more.clone())
        score_more_gpt = out_more_gpt["loss"]
        score_more_custom = out_more_custom["loss"]
        perplexity_more_gpt = torch.exp(score_more_gpt).item()
        perplexity_more_custom = torch.exp(score_more_custom).item()

        less = row["sent_less"]
        less = tokenizer(less, return_tensors="pt")["input_ids"].to(device)
        with torch.no_grad():
            out_less_gpt = model_gpt(less, labels=less.clone())
            out_less_custom = model_custom(less, labels=less.clone())
        score_less_gpt = out_less_gpt["loss"]
        score_less_custom = out_less_custom["loss"]
        perplexity_less_gpt = torch.exp(score_less_gpt).item()
        perplexity_less_custom = torch.exp(score_less_custom).item()

        if perplexity_more_gpt > perplexity_less_gpt:
            diff = round(
                abs((perplexity_more_gpt - perplexity_less_gpt) / perplexity_more_gpt), 2
            )
            shade = (diff + 0.2) / 1.2
            result += f"<td style='background-color: rgba(0,255,255,{shade})'>{diff:.2f}</td>"
        else:
            diff = abs((perplexity_less_gpt - perplexity_more_gpt) / perplexity_less_gpt)
            shade = (diff + 0.2) / 1.2
            result += f"<td style='background-color: rgba(255,0,255,{shade})'>{diff:.2f}</td>"

        if perplexity_more_custom > perplexity_less_custom:
            diff = round(
                abs((perplexity_more_custom - perplexity_less_custom) / perplexity_more_custom), 2
            )
            shade = (diff + 0.2) / 1.2
            result += f"<td style='background-color: rgba(0,255,255,{shade})'>{diff:.2f}</td>"
        else:
            diff = abs((perplexity_less_custom - perplexity_more_custom) / perplexity_less_custom)
            shade = (diff + 0.2) / 1.2
            result += f"<td style='background-color: rgba(255,0,255,{shade})'>{diff:.2f}</td>"

        result += f"<td style='padding: 0 1em; background-image: linear-gradient(90deg, rgba(255,255,255,1) 0%, rgba(255,0,255,0.2) 100%)'>{row['sent_less']}</td></tr>"
    result += "</table>"
    return result

def sample_and_run(bias_type):
    sample = dataset.sample(bias_type)
    return run(sample)

def manual_run(more, less):
    df = pandas.DataFrame.from_dict({
            'sent_more': [more],
            'sent_less': [less],
            'bias_type': ["manual"],
        })
    return run(df)

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

model_id = "gpt2"
model_gpt = GPT2LMHeadModel.from_pretrained(model_id).to(device)
model_custom = GPT2LMHeadModel.from_pretrained("iabhijith/GPT2-small-debiased").to(device)

tokenizer = GPT2TokenizerFast.from_pretrained(model_id)
dataset = CrowSPairsDataset()

bias_type_sel = gradio.Dropdown(label="Bias Type", choices=dataset.bias_types())

with open("description.md") as fh:
    desc = fh.read()

with open("descr-2.md") as fh:
    desc2 = fh.read()

with open("notice.md") as fh:
    notice = fh.read()

with open("results.md") as fh:
    results = fh.read()

with gradio.Blocks(title="Detecting stereotypes in the GPT-2 language model using CrowS-Pairs") as iface:
    gradio.Markdown(desc)
    with gradio.Row(equal_height=True):
        with gradio.Column(scale=4):
            bias_sel = gradio.Dropdown(label="Bias Type", choices=dataset.bias_types())
        with gradio.Column(scale=1):
            but = gradio.Button("Sample")
    gradio.Markdown(desc2)
    with gradio.Row(equal_height=True):
        with gradio.Column(scale=2):
            more = gradio.Textbox(label="More stereotypical")
        with gradio.Column(scale=2):
            less = gradio.Textbox(label="Less stereotypical")
        with gradio.Column(scale=1):
            manual = gradio.Button("Run")
    out = gradio.HTML()
    but.click(sample_and_run, bias_sel, out)
    manual.click(manual_run, [more, less], out)

    with gradio.Accordion("Some more details"):
        gradio.Markdown(notice)
    with gradio.Accordion("Results for English and French BERT language models"):
        gradio.Markdown(results)

iface.launch()