.env DELETED
@@ -1,2 +0,0 @@
1
- # .env
2
- PASSWORD=88888888
 
 
 
.gitignore DELETED
@@ -1,3 +0,0 @@
1
- .gitignore
2
- .env
3
- test.py
 
 
 
 
.idea/.gitignore DELETED
@@ -1,3 +0,0 @@
1
- # Default ignored files
2
- /shelf/
3
- /workspace.xml
 
 
 
 
.idea/LLM-Open-Generation-Bias.iml DELETED
@@ -1,10 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <module type="PYTHON_MODULE" version="4">
3
- <component name="NewModuleRootManager">
4
- <content url="file://$MODULE_DIR$">
5
- <excludeFolder url="file://$MODULE_DIR$/venv" />
6
- </content>
7
- <orderEntry type="inheritedJdk" />
8
- <orderEntry type="sourceFolder" forTests="false" />
9
- </component>
10
- </module>
 
 
 
 
 
 
 
 
 
 
 
.idea/inspectionProfiles/profiles_settings.xml DELETED
@@ -1,6 +0,0 @@
1
- <component name="InspectionProjectProfileManager">
2
- <settings>
3
- <option name="USE_PROJECT_PROFILE" value="false" />
4
- <version value="1.0" />
5
- </settings>
6
- </component>
 
 
 
 
 
 
 
.idea/misc.xml DELETED
@@ -1,4 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <project version="4">
3
- <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (LLM-Open-Generation-Bias)" project-jdk-type="Python SDK" />
4
- </project>
 
 
 
 
 
.idea/modules.xml DELETED
@@ -1,8 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <project version="4">
3
- <component name="ProjectModuleManager">
4
- <modules>
5
- <module fileurl="file://$PROJECT_DIR$/.idea/LLM-Open-Generation-Bias.iml" filepath="$PROJECT_DIR$/.idea/LLM-Open-Generation-Bias.iml" />
6
- </modules>
7
- </component>
8
- </project>
 
 
 
 
 
 
 
 
 
.idea/vcs.xml DELETED
@@ -1,6 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <project version="4">
3
- <component name="VcsDirectoryMappings">
4
- <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
- </component>
6
- </project>
 
 
 
 
 
 
 
new CHANGED
@@ -1,30 +1 @@
1
- newfile_nameerror = "Please enter a valid file name";
2
- }
3
- else {
4
- $newfile_nameerror = "";
5
- }
6
- }
7
- else {
8
- $newfile_nameerror = "";
9
- }
10
-
11
- if (isset($_POST['newfile_content'])) {
12
- $newfile_content = $_POST['newfile_content'];
13
- if (empty($newfile_content)) {
14
- $newfile_contenterror = "Please enter a valid file content";
15
- }
16
- else {
17
- $newfile_contenterror = "";
18
- }
19
- }
20
- else {
21
- $newfile_contenterror = "";
22
- }
23
-
24
- if ($newfile_nameerror == "" && $newfile_contenterror == "") {
25
- $newfile = fopen($newfile_name, "w");
26
- fwrite($newfile, $newfile_content);
27
- fclose($newfile);
28
- header("Location: index.php");
29
- }
30
- }
 
1
+ newfile_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pages/2_new_Demo_1.py DELETED
@@ -1,217 +0,0 @@
1
- import streamlit as st
2
- import pandas as pd
3
- from datasets import load_dataset, Dataset
4
- from random import sample
5
- from utils.metric import Regard
6
- from utils.model import gpt2
7
- import matplotlib.pyplot as plt
8
- import os
9
-
10
- # Set up the Streamlit interface
11
- st.title('Gender Bias Analysis in Text Generation')
12
-
13
-
14
- def check_password():
15
- def password_entered():
16
- if password_input == os.getenv('PASSWORD'):
17
- # if password_input == " ":
18
- st.session_state['password_correct'] = True
19
- else:
20
- st.error("Incorrect Password, please try again.")
21
-
22
- password_input = st.text_input("Enter Password:", type="password")
23
- submit_button = st.button("Submit", on_click=password_entered)
24
-
25
- if submit_button and not st.session_state.get('password_correct', False):
26
- st.error("Please enter a valid password to access the demo.")
27
-
28
-
29
- if not st.session_state.get('password_correct', False):
30
- check_password()
31
- else:
32
- st.sidebar.success("Password Verified. Proceed with the demo.")
33
-
34
- if 'data_size' not in st.session_state:
35
- st.session_state['data_size'] = 10
36
- if 'bold' not in st.session_state:
37
- bold = pd.DataFrame({})
38
- bold_raw = pd.DataFrame(load_dataset("AlexaAI/bold", split="train"))
39
- for index, row in bold_raw.iterrows():
40
- bold_raw_prompts = list(row['prompts'])
41
- bold_raw_wikipedia = list(row['wikipedia'])
42
- bold_expansion = zip(bold_raw_prompts, bold_raw_wikipedia)
43
- for bold_prompt, bold_wikipedia in bold_expansion:
44
- bold = bold._append(
45
- {'domain': row['domain'], 'name': row['name'], 'category': row['category'], 'prompts': bold_prompt,
46
- 'wikipedia': bold_wikipedia}, ignore_index=True)
47
- st.session_state['bold'] = Dataset.from_pandas(bold)
48
- if 'female_bold' not in st.session_state:
49
- st.session_state['female_bold'] = []
50
- if 'male_bold' not in st.session_state:
51
- st.session_state['male_bold'] = []
52
-
53
- st.subheader('Step 1: Set Data Size')
54
- data_size = st.slider('Select number of samples per category:', min_value=1, max_value=50,
55
- value=st.session_state['data_size'])
56
- st.session_state['data_size'] = data_size
57
-
58
- if st.button('Show Data'):
59
- st.session_state['female_bold'] = sample(
60
- [p for p in st.session_state['bold'] if p['category'] == 'American_actresses'], data_size)
61
- st.session_state['male_bold'] = sample(
62
- [p for p in st.session_state['bold'] if p['category'] == 'American_actors'], data_size)
63
-
64
- st.write(f'Sampled {data_size} female and male American actors.')
65
- st.write('**Female Samples:**', pd.DataFrame(st.session_state['female_bold']))
66
- st.write('**Male Samples:**', pd.DataFrame(st.session_state['male_bold']))
67
-
68
- if st.session_state['female_bold'] and st.session_state['male_bold']:
69
- st.subheader('Step 2: Generate Text')
70
-
71
- if st.button('Generate Text'):
72
- GPT2 = gpt2()
73
- st.session_state['male_prompts'] = [p['prompts'] for p in st.session_state['male_bold']]
74
- st.session_state['female_prompts'] = [p['prompts'] for p in st.session_state['female_bold']]
75
- st.session_state['male_wiki_continuation'] = [p['wikipedia'].replace(p['prompts'], '') for p in
76
- st.session_state['male_bold']]
77
- st.session_state['female_wiki_continuation'] = [p['wikipedia'].replace(p['prompts'], '') for p in
78
- st.session_state['female_bold']]
79
-
80
- progress_bar = st.progress(0)
81
-
82
- st.write('Generating text for male prompts...')
83
- male_generation = GPT2.text_generation(st.session_state['male_prompts'], pad_token_id=50256, max_length=50,
84
- do_sample=False, truncation=True)
85
- st.session_state['male_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
86
- zip(male_generation, st.session_state['male_prompts'])]
87
-
88
- progress_bar.progress(50)
89
-
90
- st.write('Generating text for female prompts...')
91
- female_generation = GPT2.text_generation(st.session_state['female_prompts'], pad_token_id=50256,
92
- max_length=50, do_sample=False, truncation=True)
93
- st.session_state['female_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
94
- zip(female_generation, st.session_state['female_prompts'])]
95
-
96
- progress_bar.progress(100)
97
- st.write('Text generation completed.')
98
-
99
- if st.session_state.get('male_continuations') and st.session_state.get('female_continuations'):
100
- st.subheader('Step 3: Sample Generated Texts')
101
-
102
- st.write("Male Data Samples:")
103
- samples_df = pd.DataFrame({
104
- 'Male Prompt': st.session_state['male_prompts'],
105
- 'Male Continuation': st.session_state['male_continuations'],
106
- 'Male Wiki Continuation': st.session_state['male_wiki_continuation'],
107
- })
108
- st.write(samples_df)
109
-
110
- st.write("Female Data Samples:")
111
- samples_df = pd.DataFrame({
112
- 'Female Prompt': st.session_state['female_prompts'],
113
- 'Female Continuation': st.session_state['female_continuations'],
114
- 'Female Wiki Continuation': st.session_state['female_wiki_continuation'],
115
- })
116
- st.write(samples_df)
117
-
118
- if st.button('Evaluate'):
119
- st.subheader('Step 4: Regard Results')
120
- regard = Regard("inner_compare")
121
- st.write('Computing regard results to compare male and female continuations...')
122
-
123
- with st.spinner('Computing regard results...'):
124
- regard_male_results = regard.compute(data=st.session_state['male_continuations'],
125
- references=st.session_state['male_wiki_continuation'])
126
- st.write('**Raw Regard Results:**')
127
- st.json(regard_male_results)
128
- st.session_state['rmr'] = regard_male_results
129
-
130
- regard_female_results = regard.compute(data=st.session_state['female_continuations'],
131
- references=st.session_state['female_wiki_continuation'])
132
- st.write('**Average Regard Results:**')
133
- st.json(regard_female_results)
134
- st.session_state['rfr'] = regard_female_results
135
-
136
- if st.button('Plot'):
137
- st.subheader('Step 5: Regard Results Plotting')
138
- categories = ['GPT2', 'Wiki']
139
-
140
- mp_gpt = st.session_state['rmr']['no_ref_diff_mean']['positive']
141
- mn_gpt = st.session_state['rmr']['no_ref_diff_mean']['negative']
142
- mo_gpt = 1 - (mp_gpt + mn_gpt)
143
-
144
- mp_wiki = mp_gpt - st.session_state['rmr']['ref_diff_mean']['positive']
145
- mn_wiki = mn_gpt -st.session_state['rmr']['ref_diff_mean']['negative']
146
- mo_wiki = 1 - (mn_wiki + mp_wiki)
147
-
148
- fp_gpt = st.session_state['rfr']['no_ref_diff_mean']['positive']
149
- fn_gpt = st.session_state['rfr']['no_ref_diff_mean']['negative']
150
- fo_gpt = 1 - (fp_gpt + fn_gpt)
151
-
152
- fp_wiki = fp_gpt - st.session_state['rfr']['ref_diff_mean']['positive']
153
- fn_wiki = fn_gpt - st.session_state['rfr']['ref_diff_mean']['negative']
154
- fo_wiki = 1 - (fn_wiki + fp_wiki)
155
-
156
- positive_m = [mp_gpt, mp_wiki]
157
- other_m = [mo_gpt, mo_wiki]
158
- negative_m = [mn_gpt, mn_wiki]
159
-
160
- positive_f = [fp_gpt, fp_wiki]
161
- other_f = [fo_gpt, fo_wiki]
162
- negative_f = [fn_gpt, fn_wiki]
163
-
164
- # Plotting
165
- fig_a, ax_a = plt.subplots()
166
- ax_a.bar(categories, negative_m, label='Negative', color='blue')
167
- ax_a.bar(categories, other_m, bottom=negative_m, label='Other', color='orange')
168
- ax_a.bar(categories, positive_m, bottom=[negative_m[i] + other_m[i] for i in range(len(negative_m))],
169
- label='Positive', color='green')
170
-
171
- plt.xlabel('Categories')
172
- plt.ylabel('Proportion')
173
- plt.title('GPT vs Wiki on male regard')
174
- plt.legend()
175
-
176
- st.pyplot(fig_a)
177
-
178
- fig_b, ax_b = plt.subplots()
179
- ax_b.bar(categories, negative_f, label='Negative', color='blue')
180
- ax_b.bar(categories, other_f, bottom=negative_f, label='Other', color='orange')
181
- ax_b.bar(categories, positive_f, bottom=[negative_f[i] + other_f[i] for i in range(len(negative_f))],
182
- label='Positive', color='green')
183
-
184
- plt.xlabel('Categories')
185
- plt.ylabel('Proportion')
186
- plt.title('GPT vs Wiki on female regard')
187
- plt.legend()
188
- st.pyplot(fig_b)
189
-
190
- m_increase = mp_gpt - mn_gpt
191
- m_relative_increase = mp_gpt - mp_wiki - (mn_gpt - mn_wiki)
192
- f_increase = fp_gpt - fn_gpt
193
- f_relative_increase = fp_gpt - fp_wiki - (fn_gpt - fn_wiki)
194
-
195
- absolute_difference = [m_increase, f_increase]
196
- relative_difference = [m_relative_increase, f_relative_increase]
197
-
198
- new_categories = ['Male', 'Female']
199
-
200
- fig_c, ax_c = plt.subplots()
201
- ax_c.bar(new_categories, absolute_difference, label='Positive - Negative', color='#40E0D0')
202
-
203
- plt.xlabel('Categories')
204
- plt.ylabel('Proportion')
205
- plt.title('Difference of positive and negative: Male vs Female')
206
- plt.legend()
207
- st.pyplot(fig_c)
208
-
209
- fig_d, ax_d = plt.subplots()
210
- ax_d.bar(new_categories, relative_difference, label='Positive - Negative', color='#40E0D0')
211
-
212
- plt.xlabel('Categories')
213
- plt.ylabel('Proportion')
214
- plt.title('Difference of positive and negative (relative to Wiki): Male vs Female')
215
- plt.legend()
216
- st.pyplot(fig_d)
217
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,4 +1,3 @@
1
  openai
2
  transformers
3
- torch==2.0.1
4
- matplotlib
 
1
  openai
2
  transformers
3
+ torch==2.0.1
 
utils/__pycache__/__init__.cpython-311.pyc DELETED
Binary file (197 Bytes)
 
utils/__pycache__/metric.cpython-311.pyc DELETED
Binary file (5.82 kB)
 
utils/__pycache__/model.cpython-311.pyc DELETED
Binary file (1.05 kB)
 
utils/metric.py CHANGED
@@ -43,27 +43,13 @@ class Regard:
43
  return {"average_data_regard": pred_mean, "average_references_regard": ref_mean}
44
  else:
45
  return {"regard_difference": {key: pred_mean[key] - ref_mean.get(key, 0) for key in pred_mean}}
46
- elif self.config_name == "inner_compare":
47
  pred_scores, pred_regard = self.regard(data)
48
- ref_scores, ref_regard = self.regard(references)
49
-
50
- postive_pred_regard = pred_regard['positive']
51
- positive_ref_regard = ref_regard['positive']
52
- postive_diff_regard = list(range(len(postive_pred_regard)))
53
- for score_index in range(len(postive_pred_regard)):
54
- postive_diff_regard[score_index] = postive_pred_regard[score_index] - positive_ref_regard[score_index]
55
-
56
- negative_pred_regard = pred_regard['negative']
57
- negative_ref_regard = ref_regard['negative']
58
- negative_diff_regard = list(range(len(negative_pred_regard)))
59
- for score_index in range(len(negative_pred_regard)):
60
- negative_diff_regard[score_index] = negative_pred_regard[score_index] - negative_ref_regard[score_index]
61
-
62
- ref_diff_regard = {'positive': postive_diff_regard, 'negative': negative_diff_regard}
63
- ref_diff_mean = {k: mean(v) for k, v in ref_diff_regard.items()}
64
- no_ref_diff_regard = {'positive': postive_pred_regard, 'negative': negative_pred_regard}
65
- no_ref_diff_mean = {k: mean(v) for k, v in no_ref_diff_regard.items()}
66
-
67
- return {"ref_diff_mean": ref_diff_mean,
68
- 'no_ref_diff_mean': no_ref_diff_mean}
69
-
 
43
  return {"average_data_regard": pred_mean, "average_references_regard": ref_mean}
44
  else:
45
  return {"regard_difference": {key: pred_mean[key] - ref_mean.get(key, 0) for key in pred_mean}}
46
+ else:
47
  pred_scores, pred_regard = self.regard(data)
48
+ pred_mean = {k: mean(v) for k, v in pred_regard.items()}
49
+ pred_max = {k: max(v) for k, v in pred_regard.items()}
50
+ if aggregation == "maximum":
51
+ return {"max_regard": pred_max}
52
+ elif aggregation == "average":
53
+ return {"average_regard": pred_mean}
54
+ else:
55
+ return {"regard": pred_scores}