emilylearning commited on
Commit
bd64c46
1 Parent(s): a138a7b

first commit

Browse files
Files changed (1) hide show
  1. app.py +295 -0
app.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForTokenClassification, AutoTokenizer
4
+ import pandas as pd
5
+ import numpy as np
6
+
7
+
8
+
9
+ # Play with me, consts
10
+ CONDITIONING_VARIABLES = ["none", "birth_place", "birth_date", "name"]
11
+ FEMALE_WEIGHTS = [1.5, 5] # About 5x more male than female tokens in dataset
12
+
13
+ # Internal consts
14
+ START_YEAR = 1800
15
+ STOP_YEAR = 1999
16
+ SPLIT_KEY = "DATE"
17
+
18
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
+
20
+ MAX_TOKEN_LENGTH = 128
21
+ NON_LOSS_TOKEN_ID = -100
22
+ NON_GENDERED_TOKEN_ID = 30 # Picked an int that will pop out visually
23
+ LABEL_DICT = {"female": 9, "male": -9} # Picked an int that will pop out visually
24
+ CLASSES = list(LABEL_DICT.keys())
25
+
26
+
27
+ # Fire up the models
28
+ models_paths = dict()
29
+ models = dict()
30
+
31
+ base_path = "emilylearning/"
32
+ for var in CONDITIONING_VARIABLES:
33
+ for f_weight in FEMALE_WEIGHTS:
34
+ if f_weight == 1.5:
35
+ models_paths[(var, f_weight)] = (
36
+ base_path
37
+ + f"finetuned_cgp_added_{var}__female_weight_{f_weight}__test_run_False__p_dataset_100"
38
+ )
39
+ else:
40
+ models_paths[(var, f_weight)] = (
41
+ base_path
42
+ + f"finetuned_cgp_add_{var}__f_weight_{f_weight}__p_dataset_100__test_False"
43
+ )
44
+ models[(var, f_weight)] = AutoModelForTokenClassification.from_pretrained(
45
+ models_paths[(var, f_weight)]
46
+ )
47
+
48
+
49
+ # Tokenizers same for each model, so just grabbing one of them
50
+ tokenizer = AutoTokenizer.from_pretrained(
51
+ models_paths[(CONDITIONING_VARIABLES[0], FEMALE_WEIGHTS[0])], add_prefix_space=True
52
+ )
53
+ MASK_TOKEN_ID = tokenizer.mask_token_id
54
+
55
+
56
+ # more static stuff
57
+ gendered_lists = [
58
+ ["he", "she"],
59
+ ["him", "her"],
60
+ ["his", "hers"],
61
+ ["male", "female"],
62
+ ["man", "woman"],
63
+ ["men", "women"],
64
+ ["husband", "wife"],
65
+ ]
66
+ male_gendered_dict = {list[0]: list for list in gendered_lists}
67
+ female_gendered_dict = {list[1]: list for list in gendered_lists}
68
+
69
+ male_gendered_token_ids = tokenizer.convert_tokens_to_ids(
70
+ list(male_gendered_dict.keys())
71
+ )
72
+ female_gendered_token_ids = tokenizer.convert_tokens_to_ids(
73
+ list(female_gendered_dict.keys())
74
+ )
75
+ assert tokenizer.unk_token_id not in male_gendered_token_ids
76
+ assert tokenizer.unk_token_id not in female_gendered_token_ids
77
+
78
+ label_list = list(LABEL_DICT.values())
79
+ assert label_list[0] == LABEL_DICT["female"], "LABEL_DICT not an ordered dict"
80
+
81
+ label2id = {label: idx for idx, label in enumerate(label_list)}
82
+
83
+ # Prepare text
84
+ def tokenize_and_append_metadata(text, tokenizer):
85
+ tokenized = tokenizer(
86
+ text,
87
+ truncation=True,
88
+ padding=True,
89
+ max_length=MAX_TOKEN_LENGTH,
90
+ )
91
+
92
+ # Finding the gender pronouns in the tokens
93
+ token_ids = tokenized["input_ids"]
94
+ female_tags = torch.tensor(
95
+ [
96
+ LABEL_DICT["female"]
97
+ if id in female_gendered_token_ids
98
+ else NON_GENDERED_TOKEN_ID
99
+ for id in token_ids
100
+ ]
101
+ )
102
+ male_tags = torch.tensor(
103
+ [
104
+ LABEL_DICT["male"]
105
+ if id in male_gendered_token_ids
106
+ else NON_GENDERED_TOKEN_ID
107
+ for id in token_ids
108
+ ]
109
+ )
110
+
111
+ # Labeling and masking out occurrences of gendered pronouns
112
+ labels = torch.tensor([NON_LOSS_TOKEN_ID] * len(token_ids))
113
+ labels = torch.where(
114
+ female_tags == LABEL_DICT["female"],
115
+ label2id[LABEL_DICT["female"]],
116
+ NON_LOSS_TOKEN_ID,
117
+ )
118
+ labels = torch.where(
119
+ male_tags == LABEL_DICT["male"], label2id[LABEL_DICT["male"]], labels
120
+ )
121
+ masked_token_ids = torch.where(
122
+ female_tags == LABEL_DICT["female"], MASK_TOKEN_ID, torch.tensor(token_ids)
123
+ )
124
+ masked_token_ids = torch.where(
125
+ male_tags == LABEL_DICT["male"], MASK_TOKEN_ID, masked_token_ids
126
+ )
127
+
128
+ tokenized["input_ids"] = masked_token_ids
129
+ tokenized["labels"] = labels
130
+
131
+ return tokenized
132
+
133
+
134
+ # Run inference
135
+ def predict_gender_pronouns(
136
+ num_points, conditioning_variables, f_weights, input_text, return_preds=False
137
+ ):
138
+
139
+ text_portions = input_text.split(SPLIT_KEY)
140
+
141
+ years = np.linspace(START_YEAR, STOP_YEAR, int(num_points)).astype(int)
142
+
143
+ dfs = []
144
+ dfs.append(pd.DataFrame({"year": years}))
145
+ for f_weight in f_weights:
146
+ for var in conditioning_variables:
147
+ prefix = f"w{f_weight}_{var}"
148
+ model = models[(var, f_weight)]
149
+
150
+ p_female = []
151
+ p_male = []
152
+ for b_date in years:
153
+ target_text = f"{b_date}".join(text_portions)
154
+ tokenized_sample = tokenize_and_append_metadata(
155
+ target_text,
156
+ tokenizer=tokenizer,
157
+ )
158
+
159
+ ids = tokenized_sample["input_ids"]
160
+ atten_mask = torch.tensor(tokenized_sample["attention_mask"])
161
+ toks = tokenizer.convert_ids_to_tokens(ids)
162
+ labels = tokenized_sample["labels"]
163
+
164
+ with torch.no_grad():
165
+ outputs = model(ids.unsqueeze(dim=0), atten_mask.unsqueeze(dim=0))
166
+ preds = torch.argmax(outputs[0][0].cpu(), dim=1)
167
+
168
+ was_masked = labels.cpu() != -100
169
+ preds = torch.where(was_masked, preds, -100)
170
+ num_preds = torch.sum(was_masked).item()
171
+
172
+ p_female.append(len(torch.where(preds==0)[0])/num_preds*100)
173
+ p_male.append(len(torch.where(preds==1)[0])/num_preds*100)
174
+
175
+ dfs.append(pd.DataFrame({f"%f_{prefix}": p_female, f"%m_{prefix}": p_male}))
176
+
177
+ results = pd.concat(dfs, axis=1).set_index("year")
178
+
179
+ female_df = results.filter(regex=".*f_")
180
+ female_df_for_plot = (
181
+ female_df.reset_index()
182
+ ) # Gradio timeseries requires x-axis as column?
183
+
184
+ male_df = results.filter(regex=".*m_")
185
+ male_df_for_plot = (
186
+ male_df.reset_index()
187
+ ) # Gradio timeseries requires x-axis as column?
188
+
189
+ return (
190
+ target_text,
191
+ female_df_for_plot,
192
+ female_df,
193
+ male_df_for_plot,
194
+ male_df,
195
+ )
196
+
197
+
198
+ title = "Changing Gender Pronouns"
199
+ description = """
200
+ This is a demo for a project exploring possible spurious correlations in training datasets that can be exploited and manipulated to achieve alternative outcomes. In this case, manipulating `DATE` to change the predicted gender pronouns for both the BERT base model and a model fine-tuned with a specific pronoun predicting task using the [wiki-bio](https://huggingface.co/datasets/wiki_bio) dataset.
201
+ One way to explain phenomena is by looking at a likely data generating process for biographical-like data in both the main BERT training dataset as well as the `wiki_bio` dataset, in the form of a causal DAG.
202
+
203
+ In the DAG, we can see that `birth_place`, `birth_date` and `gender` are all independent elements that have no common cause with the other covariates in the DAG. However `birth_place`, `birth_date` and `gender` may all have a role in causing one's `access_to_resources`, with the general trend that `access_to_resources` has become less gender-dependent over time, but not in every `birth_place`, with recent events in Afghanistan providing a stark counterexample to this trend. `access_to_resources` further determines how or if at all, you may appear in the dataset’s `context_words`.
204
+
205
+ We also argue that although there are complex causal interactions between words in a segment, the `context_words` are more likely to cause the `gender_pronouns`, rather than vice versa. For example, if the subject is a famous doctor and the object is her wealthy father, these context words will determine which person is being referred to, and thus which gendered-pronoun to use.
206
+
207
+
208
+ In this graph, any pink path between `context_words` and `gender_pronouns` will allow the flow of statistical correlation (regardless of direction of the causal arrow), inviting confounding and thus spurious correlations into the trained model.
209
+
210
+ <center>
211
+ <img src="https://www.dropbox.com/s/x60r43h7uwztnru/generic_ds_dag.png?raw=1"
212
+ alt="DAG of possible data generating process for datasets used in training.">
213
+ </center>
214
+
215
+ Those familiar with causal DAGs may note when can simply condition on `gender` to block any confounding between the `context_words` and the `gender_pronouns`. However, this is not always possible, particularly in generative or mask-filling tasks, like those common in language models.
216
+
217
+ Here, we automatically mask (for prediction) the following tokens (and they will also be automatically masked if you use them below.)
218
+ ```
219
+ gendered_lists = [
220
+ ['he', 'she'],
221
+ ['him', 'her'],
222
+ ['his', 'hers'],
223
+ ['male', 'female'],
224
+ ['man', 'woman'],
225
+ ['men', 'women'],
226
+ ["husband", "wife"],
227
+ ]
228
+ ```
229
+
230
+ In this demo we are looking for a dose-response relationship between:
231
+ - our treatment: the text,
232
+ - and our outcome: the predicted gender of pronouns in the text.
233
+
234
+ Specifically we are seeing if making larger magnitude intervention: an older `DATE` in the text will result in a larger magnitude effect in the outcome: higher percentage of predicted female pronouns.
235
+
236
+ In the demo below you can select among 4 different fine-tuning methods:
237
+ - which, if any, conditioning variable was appended to the text.
238
+
239
+ And two different weighting schemes that were used in the loss function to nudge more toward the minority class in the dataset:
240
+ - female pronouns.
241
+
242
+ """
243
+
244
+
245
+ article = "Check out [main colab notebook](https://colab.research.google.com/drive/14ce4KD6PrCIL60Eng-t79tEI1UP-DHGz?usp=sharing#scrollTo=Mg1tUeHLRLaG) \
246
+ with a lot more details about this method and implementation."
247
+
248
+ gr.Interface(
249
+ fn=predict_gender_pronouns,
250
+ inputs=[
251
+ gr.inputs.Number(
252
+ default=10,
253
+ label="Number of points (years) plotted -- select fewer if slow.",
254
+ ),
255
+ gr.inputs.CheckboxGroup(
256
+ CONDITIONING_VARIABLES,
257
+ default=["none", "birth_date"],
258
+ type="value",
259
+ label="Pick model(s) that were trained with the following conditioning variables",
260
+ ),
261
+ gr.inputs.CheckboxGroup(
262
+ FEMALE_WEIGHTS,
263
+ default=[5],
264
+ type="value",
265
+ label="Pick model(s) that were trained with the following loss function weight on female predictions",
266
+ ),
267
+ gr.inputs.Textbox(
268
+ lines=7,
269
+ label="Input Text. Include one of more instance of the word 'DATE' below, to be replace with a range of dates in demo.",
270
+ default="Born DATE, she was a computer scientist. Her work was greatly respected, and she was well-regarded in her field.",
271
+ ),
272
+ ],
273
+ outputs=[
274
+ gr.outputs.Textbox(type="auto", label="Sample target text fed to model"),
275
+ gr.outputs.Timeseries(
276
+ x="year",
277
+ label="Precent pred female pronoun vs year, per model trained with conditioning and with weight for female preds",
278
+ ),
279
+ gr.outputs.Dataframe(
280
+ overflow_row_behaviour="show_ends",
281
+ label="Precent pred female pronoun vs year, per model trained with conditioning and with weight for female preds",
282
+ ),
283
+ gr.outputs.Timeseries(
284
+ x="year",
285
+ label="Precent pred male pronoun vs year, per model trained with conditioning and with weight for female preds",
286
+ ),
287
+ gr.outputs.Dataframe(
288
+ overflow_row_behaviour="show_ends",
289
+ label="Precent pred male pronoun vs year, per model trained with conditioning and with weight for female preds",
290
+ ),
291
+ ],
292
+ title = title,
293
+ description = description,
294
+ article = article
295
+ ).launch()