mmahesh873 commited on
Commit
dbd94bb
1 Parent(s): 9101fdb

pushing new comparision assessment

Browse files
Files changed (2) hide show
  1. app.py +218 -125
  2. app_old.py +237 -0
app.py CHANGED
@@ -5,6 +5,7 @@ import pandas as pd
5
  import streamlit as st
6
  import plotly.express as px
7
  from config import other_info_dict
 
8
  # %%
9
  st.title("Microsoft Phi-2 LLM assessment")
10
  # st.image('model_card.png', caption='Hugging face description', use_column_width=True)
@@ -31,52 +32,163 @@ prompt_options_dict = {
31
  'Prompt option 2 (Prompt option 2 with two shot prompting)': 'prompt_option_2.json',
32
  'Prompt option 3 (Prompt option 0 with minor fixes)': 'prompt_option_3.json'
33
  }
34
- t_result_file = st.selectbox(
35
- 'Select the prompt:',
36
- list(prompt_options_dict.keys()))
37
 
38
- result_file = prefix_post_processing + prompt_options_dict[t_result_file]
39
 
40
- prompt_option = int(prompt_options_dict[t_result_file].split('_')[-1].split('.')[0])
 
 
 
41
 
42
- with urllib.request.urlopen(result_file) as url:
43
- data_dict = json.load(url)
44
-
45
- # File uploader
46
- with open(f'prompt_{prompt_option}.txt', "r") as file:
47
- file_contents = file.read()
 
 
 
 
48
  # st.write(file_contents)
49
- st.text_area("Prompt template:", value=file_contents, height=300)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  st.write("The answer to the question is obtained by post-processing the output of the LLM, wherein any additional content starting from the first 'Context: ' is disregarded.")
51
  st.write("In the case that the LLM answers <NO ANSWER>, the output is set to an empty string.")
52
 
53
  # 'Context: ' + context + '\n\n' + 'Question: ' + t_question + '\n\n' + 'Answer:'
54
  # %%
55
- overall_performance = round(data_dict["Overall performance"]*100, 2)
56
 
57
  st.header('Performance metric')
58
  st.write("""The performance metric used is an estimation of the percentage of correctly answered questions, i.e. the output of the model coincides with one of the ground truth answers. The performance metric can also be interpreted as the probability that the model correctly answers a question. The performance of the model is evaluated with the exact match accuracy metric (see compute_exact function in SQuAD2.0 official evaluation script [here](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/)), taking values in [0,1], where 0 is worst (model always wrong), and 1 is best (model always correct). It is the number of correctly answered questions divided by the number of data points. An answer is considered to be correctly answered (by the model), if the predicted answer after normalization (text is converted to lowercase, and punctuation, articles and extra whitespace are removed) matches exactly with any of the normalized ground truth answers. In the case of unanswerable questions, the empty string is considered to be the only ground truth answer.""")
59
  with st.container():
60
- st.write(f"**Overall performance: {overall_performance}%**")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  # %%
62
  st.header("Bias ratios")
63
  st.write('Bias ratio is defined as the ratio of the lowest performance to the highest performance among categories that have sufficient data (with more than 50 data points) for a characteristic. The following table shows the bias ratio for each of the considered characteristic.')
64
- fairness_results = data_dict['Fairness results']
65
 
66
- characteristic_list = []
67
- fairness_ratio_list = []
68
- for key, val in fairness_results.items():
69
- characteristic_list += [key]
70
- fairness_ratio_list += [val['OverallFairness']]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- ch_df = pd.DataFrame({
73
- 'Characteristic': characteristic_list,
74
- 'Bias ratio': fairness_ratio_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  })
76
- st.dataframe(ch_df)
77
-
78
-
79
-
80
 
81
 
82
  # %%
@@ -92,39 +204,15 @@ st.write(f"""We evaluate the robustness of the LLM by assessing the variation in
92
  # st.write(f"ProbTypos: {other_info_dict['ProbTypos_description']}")
93
  # st.write(f"MaxTypo: {other_info_dict['MaxTypo_description']}")
94
 
95
- global_perturber_families = data_dict['Perturber Families']
96
- t_pert_fig = None
97
- perf_pert_values = []
98
- normalized_perf_pert_values = []
99
- family_levels = []
100
- family_names_list = []
101
- levels_index_list = []
102
- for item in global_perturber_families:
103
- family_name = item['family name']
104
- family_results = data_dict['Performance Robustness']['Perturber family wise results'][family_name]["PerformancePerturbers"]# TODO: change the structuer of post processing here
105
- family_levels += item['levels']
106
- original_perf = family_results[item['levels'][0]]
107
- count = 0
108
- for t_item in item['levels']:
109
- perf_pert_values += [family_results[t_item]]
110
- normalized_perf_pert_values += [family_results[t_item]/original_perf]
111
- family_names_list += [family_name]
112
- levels_index_list += [count]
113
- count += 1
114
-
115
- t_pert_df_global = pd.DataFrame({
116
- 'Perturbation level': family_levels,
117
- 'Performance': perf_pert_values,
118
- 'normalized performance': normalized_perf_pert_values,
119
- 'Perturbation family': family_names_list,
120
- 'Levels' : levels_index_list
121
- })
122
 
123
- t_pert_fig = px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
124
- t_pert_fig.update_xaxes(tickmode='linear', dtick=1)
125
 
126
 
127
- st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
128
 
129
 
130
  # %%
@@ -149,55 +237,62 @@ if 'gender' in option:
149
  if 'ethnicity' in option:
150
  st.write(other_info_dict['ethnicity_categories_text'])
151
 
152
- embedder_perf_ci_table = data_dict['Performance results'][option]['CI_Table']
153
- n_points = data_dict['n points']
154
- category_share_of_data = {}
155
- categories_list = []
156
- share_of_data_list = []
157
- n_points_list = []
158
- for key, val in embedder_perf_ci_table.items():
159
- categories_list += [val['category']]
160
- share_of_data_list += [val['Share of Data']]
161
- n_points_list += [int(val['Share of Data']*n_points/100)]
162
 
163
- st.markdown("---")
164
- st.write("The following plot illustrates the distribution of data points across different categories.")
165
- t_df = pd.DataFrame({
166
- 'Category': categories_list,
167
- 'Share of data': share_of_data_list,
168
- 'Number of points': n_points_list
169
- })
170
  fig = px.bar(t_df, x='Category', y='Number of points')
171
 
172
  st.plotly_chart(fig, theme="streamlit", use_container_width=True)
173
  st.markdown("---")
174
 
175
- st.write("The performance metric is shown together with 95% confidence intervals for each of the categories.")
176
-
177
-
178
- embedder_fair_ci_table = data_dict['Fairness results'][option]['CI_Table']
179
- categories_list = []
180
- estimates_list = []
181
- uppers_list = []
182
- lowers_list = []
183
- for key, val in embedder_fair_ci_table.items():
184
- categories_list += [val['category']]
185
- estimates_list += [val['Estimate']]
186
- uppers_list += [val['Upper']]
187
- lowers_list += [val['Lower']]
188
-
189
- t_fair_df = pd.DataFrame({
190
- 'Category': categories_list,
191
- 'Estimate': estimates_list,
192
- 'Upper': uppers_list,
193
- 'Lower': lowers_list
194
- })
195
 
196
- t_fair_df['Diff upper'] = t_fair_df['Upper'] - t_fair_df['Estimate']
197
- t_fair_df['Diff lower'] = t_fair_df['Estimate'] - t_fair_df['Lower']
198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
- fig_fair = px.scatter(t_fair_df, x='Category', y='Estimate', error_y='Diff upper', error_y_minus='Diff lower')
201
  fig_fair.update_layout(yaxis_title="Performance in %")
202
 
203
  st.plotly_chart(fig_fair, theme="streamlit", use_container_width=True)
@@ -205,33 +300,31 @@ st.markdown("---")
205
 
206
  st.write('The following plots show the normalized average performance for each category of a characteristic, for each level of perturbation, starting with no perturbation. Each curve represents the normalized average performance on a category, by which we mean that we divide the average performance at every level of perturbation by the average performance without perturbation. ')
207
 
208
- t_result = data_dict['Performance Robustness']['Embedder wise results'][option]
209
- # Embedder categories
210
- for item in global_perturber_families:
211
- family_name = item['family name']
212
- dfs_list = []
213
- count = 0
214
- for t_item in item['levels']:
215
- df = pd.DataFrame(t_result[t_item])
216
- df['Perturber'] = t_item
217
- df['Perturber family'] = family_name
218
- df['Levels'] = count
219
- dfs_list += [df]
220
- count += 1
221
- merged_df = pd.concat(dfs_list, axis=0)
222
-
223
- temp_header = f'Perturber family: {family_name}'
224
- # st.markdown(f'##### {temp_header}')
225
- t_pert_fig = px.line(merged_df, x="Levels", y="normalized performance", color='category')
226
- t_pert_fig.update_layout(yaxis_title="Normalized performance")
227
-
228
- # px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
229
- t_pert_df_global_temp = t_pert_df_global[t_pert_df_global['Perturbation family'] == family_name].copy(deep=True)
230
- t_pert_df_global_temp['category'] = 'Overall'
231
-
232
- t_pert_fig.add_trace(px.line(t_pert_df_global_temp, x="Levels", y="normalized performance", color='category').data[0])
233
- t_pert_fig.update_xaxes(tickmode='linear', dtick=1)
234
 
235
- st.write(f'The following plot illustrates the normalized performance of the model across different categories for the perturbation family: {family_name}.')
236
- st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
237
  st.markdown("---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import streamlit as st
6
  import plotly.express as px
7
  from config import other_info_dict
8
+ from utils import *
9
  # %%
10
  st.title("Microsoft Phi-2 LLM assessment")
11
  # st.image('model_card.png', caption='Hugging face description', use_column_width=True)
 
32
  'Prompt option 2 (Prompt option 2 with two shot prompting)': 'prompt_option_2.json',
33
  'Prompt option 3 (Prompt option 0 with minor fixes)': 'prompt_option_3.json'
34
  }
 
 
 
35
 
36
+ prompt_options_list = list(prompt_options_dict.keys())
37
 
38
+ options = st.multiselect(
39
+ 'Select prompts:',
40
+ prompt_options_list,
41
+ [prompt_options_list[0]])
42
 
43
+ # st.write('Selected prompts:')
44
+ # for t_opt in options:
45
+ # st.write(t_opt)
46
+ prompt_options_nums_list = []
47
+ st.markdown("---")
48
+ for t_opt in options:
49
+ st.write(t_opt)
50
+ prompt_option = int(prompt_options_dict[t_opt].split('_')[-1].split('.')[0])
51
+ with open(f'prompt_{prompt_option}.txt', "r") as file:
52
+ file_contents = file.read()
53
  # st.write(file_contents)
54
+ st.text_area("Prompt template:", value=file_contents, height=100)
55
+ st.markdown("---")
56
+ prompt_options_nums_list += [prompt_option]
57
+
58
+ result_processor_obj_dict = {}
59
+ result_file_dict = {}
60
+ data_dicts_dict = {}
61
+ for t_result_file in options:
62
+ result_file = prefix_post_processing + prompt_options_dict[t_result_file]
63
+
64
+ prompt_option = int(prompt_options_dict[t_result_file].split('_')[-1].split('.')[0])
65
+
66
+ with urllib.request.urlopen(result_file) as url:
67
+ data_dict = json.load(url)
68
+
69
+
70
+
71
+ result_processor_obj_dict[t_result_file] = ResultsProcessor(
72
+ prompt_option=prompt_option,
73
+ result_file=result_file,
74
+ data_dict= data_dict
75
+ )
76
+ data_dicts_dict[t_result_file] = data_dict
77
+ result_file_dict[t_result_file] = result_file
78
+
79
+
80
+ print(result_processor_obj_dict)
81
+
82
+
83
+
84
+
85
+
86
+ # t_result_file = st.selectbox(
87
+ # 'Select the prompt:',
88
+ # list(prompt_options_dict.keys()))
89
+
90
+
91
+
92
+
93
+ # result_file = prefix_post_processing + prompt_options_dict[t_result_file]
94
+
95
+ # prompt_option = int(prompt_options_dict[t_result_file].split('_')[-1].split('.')[0])
96
+
97
+ # with urllib.request.urlopen(result_file) as url:
98
+ # data_dict = json.load(url)
99
+
100
+
101
+ # # File uploader
102
+ # with open(f'prompt_{prompt_option}.txt', "r") as file:
103
+ # file_contents = file.read()
104
+ # # st.write(file_contents)
105
+ # st.text_area("Prompt template:", value=file_contents, height=300)
106
  st.write("The answer to the question is obtained by post-processing the output of the LLM, wherein any additional content starting from the first 'Context: ' is disregarded.")
107
  st.write("In the case that the LLM answers <NO ANSWER>, the output is set to an empty string.")
108
 
109
  # 'Context: ' + context + '\n\n' + 'Question: ' + t_question + '\n\n' + 'Answer:'
110
  # %%
 
111
 
112
  st.header('Performance metric')
113
  st.write("""The performance metric used is an estimation of the percentage of correctly answered questions, i.e. the output of the model coincides with one of the ground truth answers. The performance metric can also be interpreted as the probability that the model correctly answers a question. The performance of the model is evaluated with the exact match accuracy metric (see compute_exact function in SQuAD2.0 official evaluation script [here](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/)), taking values in [0,1], where 0 is worst (model always wrong), and 1 is best (model always correct). It is the number of correctly answered questions divided by the number of data points. An answer is considered to be correctly answered (by the model), if the predicted answer after normalization (text is converted to lowercase, and punctuation, articles and extra whitespace are removed) matches exactly with any of the normalized ground truth answers. In the case of unanswerable questions, the empty string is considered to be the only ground truth answer.""")
114
  with st.container():
115
+
116
+ overall_performance_list = []
117
+
118
+ for t_opt in options:
119
+ overall_performance_list += [result_processor_obj_dict[t_opt].get_overall_performance()]
120
+
121
+ t_perf_overall_df = pd.DataFrame({
122
+ 'Prompt' : options,
123
+ 'Overall performance': overall_performance_list
124
+ })
125
+ st.dataframe(t_perf_overall_df.set_index(t_perf_overall_df.columns[0]))
126
+
127
+
128
+ perf_dict = t_perf_overall_df['Overall performance'].describe().round(2).to_dict()
129
+
130
+ if len(options) > 1:
131
+ st.subheader('Statistics of performance metrics across selected prompts')
132
+ st.write('Mean ', perf_dict['mean'])
133
+ st.write('Standard deviation: ', perf_dict['std'])
134
+ st.write('Minimum ', perf_dict['min'])
135
+ st.write('Maximum ', perf_dict['max'])
136
+
137
+
138
+
139
  # %%
140
  st.header("Bias ratios")
141
  st.write('Bias ratio is defined as the ratio of the lowest performance to the highest performance among categories that have sufficient data (with more than 50 data points) for a characteristic. The following table shows the bias ratio for each of the considered characteristic.')
 
142
 
143
+ processed_t_opt_dict = {}
144
+ ch_df = None
145
+ for t_opt in options:
146
+ processed_t_opt = t_opt.split('(')[0].strip()
147
+ processed_t_opt_dict[t_opt] = processed_t_opt
148
+ if ch_df is None:
149
+ ch_df = result_processor_obj_dict[t_opt].get_bias_ratios_df()
150
+ ch_df[processed_t_opt] = ch_df['Bias ratio'].values
151
+
152
+ # ch_df.rename(columns={'Bias ratio': t_opt})
153
+ else:
154
+
155
+ t_ch_df = result_processor_obj_dict[t_opt].get_bias_ratios_df()
156
+
157
+ assert (ch_df['Characteristic'].values == t_ch_df['Characteristic'].values).all()
158
+ ch_df[processed_t_opt] = t_ch_df['Bias ratio'].values
159
+
160
+ ch_df.drop(columns=['Bias ratio'], axis=1, inplace=True)
161
+
162
+
163
+ ch_df.set_index(ch_df.columns[0], inplace=True)
164
+ # ch_df = result_processor_obj_dict[t_result_file].get_bias_ratios_df()
165
 
166
+ with st.container():
167
+ st.dataframe(ch_df) # Todo: MAX MIN HIGHLIGHT
168
+
169
+ means_list = []
170
+ stds_list= []
171
+ mins_list = []
172
+ maxs_list = []
173
+ for i in ch_df.index:
174
+ selected_columns = list(processed_t_opt_dict.keys()) # Example: Columns 'column1' and 'column3'
175
+ # Create statistics for the selected rows across selected columns
176
+ statistics = ch_df.loc[i].describe().loc[['mean', 'std', 'min', 'max']].to_dict()
177
+ means_list += [statistics['mean']]
178
+ stds_list += [statistics['std']]
179
+ mins_list += [statistics['min']]
180
+ maxs_list += [statistics['max']]
181
+
182
+ t_ch_df = pd.DataFrame({
183
+ 'Mean': means_list,
184
+ 'Standard deviation' : stds_list,
185
+ 'Minimum': mins_list,
186
+ 'Maximum': maxs_list
187
  })
188
+ t_ch_df.index = ch_df.index
189
+ if len(options) > 1:
190
+ with st.container():
191
+ st.dataframe(t_ch_df)
192
 
193
 
194
  # %%
 
204
  # st.write(f"ProbTypos: {other_info_dict['ProbTypos_description']}")
205
  # st.write(f"MaxTypo: {other_info_dict['MaxTypo_description']}")
206
 
207
+ for t_opt in options:
208
+ st.write('Prompt used : ', t_opt)
209
+ t_pert_df_global = result_processor_obj_dict[t_opt].get_global_perturbers_df()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
+ t_pert_fig = px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
212
+ t_pert_fig.update_xaxes(tickmode='linear', dtick=1)
213
 
214
 
215
+ st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
216
 
217
 
218
  # %%
 
237
  if 'ethnicity' in option:
238
  st.write(other_info_dict['ethnicity_categories_text'])
239
 
 
 
 
 
 
 
 
 
 
 
240
 
241
+ t_df = result_processor_obj_dict[options[0]].get_data_distribution(option)
242
+
 
 
 
 
 
243
  fig = px.bar(t_df, x='Category', y='Number of points')
244
 
245
  st.plotly_chart(fig, theme="streamlit", use_container_width=True)
246
  st.markdown("---")
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
 
 
249
 
250
+ st.write("The performance metric for each category is shown across the selected prompts.")
251
+ count = 0
252
+ t_fair_dfs_list = []
253
+ for t_opt in options:
254
+ t_fair_df = result_processor_obj_dict[t_opt].get_fairness_confidence_interval_df(option)
255
+ t_fair_df['Prompt'] = processed_t_opt_dict[t_opt]
256
+ t_fair_dfs_list += [t_fair_df]
257
+ count +=1
258
+
259
+ merged_t_fair_df = pd.concat(t_fair_dfs_list, axis=0)
260
+
261
+
262
+ fig_fair = px.scatter(merged_t_fair_df, x='Category', y='Estimate', color='Prompt', symbol='Prompt')
263
+
264
+ # fig_fair = None
265
+
266
+ fig_fair.update_layout(yaxis_title="Performance in %")
267
+
268
+ st.plotly_chart(fig_fair, theme="streamlit", use_container_width=True)
269
+
270
+
271
+ st.markdown("---")
272
+ st.write("The performance metric is shown together with 95% confidence intervals for each category, across the selected prompts.")
273
+
274
+ temp_options = st.multiselect(
275
+ 'Choose from your pre-selected prompts:',
276
+ options,
277
+ [options[0]])
278
+
279
+ t_fair_dfs_list_map = {}
280
+ count = 0
281
+ t_fair_dfs_list = []
282
+ for t_opt in temp_options:
283
+ t_fair_df = result_processor_obj_dict[t_opt].get_fairness_confidence_interval_df(option)
284
+ t_fair_df['Prompt'] = processed_t_opt_dict[t_opt]
285
+ t_fair_dfs_list_map[t_opt] = count
286
+ t_fair_dfs_list += [t_fair_df]
287
+ count +=1
288
+
289
+ merged_t_fair_df = pd.concat(t_fair_dfs_list, axis=0)
290
+
291
+
292
+ fig_fair = px.scatter(merged_t_fair_df, x='Category', y='Estimate', error_y='Diff upper', error_y_minus='Diff lower', color='Prompt', symbol='Prompt')
293
+
294
+ # fig_fair = None
295
 
 
296
  fig_fair.update_layout(yaxis_title="Performance in %")
297
 
298
  st.plotly_chart(fig_fair, theme="streamlit", use_container_width=True)
 
300
 
301
  st.write('The following plots show the normalized average performance for each category of a characteristic, for each level of perturbation, starting with no perturbation. Each curve represents the normalized average performance on a category, by which we mean that we divide the average performance at every level of perturbation by the average performance without perturbation. ')
302
 
303
+ temp_options_2 = st.multiselect(
304
+ 'Choose from your pre-selected prompts:',
305
+ options,
306
+ [options[0]], key='Performance Robustness')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
 
 
 
308
  st.markdown("---")
309
+ for t_opt in temp_options_2:
310
+ results_pert_rob_dict = result_processor_obj_dict[t_result_file].get_performance_robustness(option)
311
+ merged_dfs_list = results_pert_rob_dict['merged_dfs_list']
312
+ t_pert_df_global_temps_list = results_pert_rob_dict['t_pert_df_global_temps_list']
313
+ family_names_list = results_pert_rob_dict['family_names_list']
314
+
315
+ for merged_df, t_pert_df_global_temp, family_name in zip(merged_dfs_list, t_pert_df_global_temps_list, family_names_list):
316
+ title_name = 'Perturbation family: ' + family_name + '\n\n Prompt : ' + t_opt
317
+ t_pert_fig = px.line(merged_df, x="Levels", y="normalized performance", color='category')
318
+ t_pert_fig.update_layout(yaxis_title="Normalized performance")
319
+
320
+ t_pert_fig.add_trace(px.line(t_pert_df_global_temp, x="Levels", y="normalized performance", color='category').data[0])
321
+ t_pert_fig.update_xaxes(tickmode='linear')
322
+ # t_pert_fig.update_layout(title=title_name)
323
+
324
+
325
+
326
+ # st.write(f'The following plot illustrates the normalized performance of the model across different categories for the perturbation family: {family_name}.')
327
+ st.write(title_name)
328
+ st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
329
+ st.markdown("---")
330
+
app_old.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ # TODOS: Plots with plotly
3
+ import json
4
+ import pandas as pd
5
+ import streamlit as st
6
+ import plotly.express as px
7
+ from config import other_info_dict
8
+ # %%
9
+ st.title("Microsoft Phi-2 LLM assessment")
10
+ # st.image('model_card.png', caption='Hugging face description', use_column_width=True)
11
+ st.write("""
12
+ Microsoft Phi-2 (https://huggingface.co/microsoft/phi-2) is a Transformer model with 2.7 billion parameters. Performance on benchmarks for common sense, language understanding, and logical reasoning is nearly state-of-the-art among models with less than 13 billion parameters. Unlike typical Large Language Models (LLM), Phi-2 has not been fine-tuned through reinforcement learning from human feedback.""")
13
+
14
+ import urllib.request
15
+ import os
16
+ prefix_post_processing = os.environ["POST_PROCESSING_JSON"]
17
+
18
+
19
+ st.header('Evaluation dataset')
20
+ st.write(other_info_dict['data_description'])
21
+
22
+
23
+ # %%
24
+ st.header("Prompt")
25
+ st.write("For each data point in the evaluation dataset, we create a prompt for LLM by adding the context and the question to the below prompt template, while following the same structure of the prompt template.")
26
+
27
+
28
+ prompt_options_dict = {
29
+ 'Prompt option 0 (with typos and grammatical errors, two shot prompting)': 'prompt_option_0.json',
30
+ 'Prompt option 1 (Zero shot prompting)': 'prompt_option_1.json',
31
+ 'Prompt option 2 (Prompt option 2 with two shot prompting)': 'prompt_option_2.json',
32
+ 'Prompt option 3 (Prompt option 0 with minor fixes)': 'prompt_option_3.json'
33
+ }
34
+ t_result_file = st.selectbox(
35
+ 'Select the prompt:',
36
+ list(prompt_options_dict.keys()))
37
+
38
+ result_file = prefix_post_processing + prompt_options_dict[t_result_file]
39
+
40
+ prompt_option = int(prompt_options_dict[t_result_file].split('_')[-1].split('.')[0])
41
+
42
+ with urllib.request.urlopen(result_file) as url:
43
+ data_dict = json.load(url)
44
+
45
+ # File uploader
46
+ with open(f'prompt_{prompt_option}.txt', "r") as file:
47
+ file_contents = file.read()
48
+ # st.write(file_contents)
49
+ st.text_area("Prompt template:", value=file_contents, height=300)
50
+ st.write("The answer to the question is obtained by post-processing the output of the LLM, wherein any additional content starting from the first 'Context: ' is disregarded.")
51
+ st.write("In the case that the LLM answers <NO ANSWER>, the output is set to an empty string.")
52
+
53
+ # 'Context: ' + context + '\n\n' + 'Question: ' + t_question + '\n\n' + 'Answer:'
54
+ # %%
55
+ overall_performance = round(data_dict["Overall performance"]*100, 2)
56
+
57
+ st.header('Performance metric')
58
+ st.write("""The performance metric used is an estimation of the percentage of correctly answered questions, i.e. the output of the model coincides with one of the ground truth answers. The performance metric can also be interpreted as the probability that the model correctly answers a question. The performance of the model is evaluated with the exact match accuracy metric (see compute_exact function in SQuAD2.0 official evaluation script [here](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/)), taking values in [0,1], where 0 is worst (model always wrong), and 1 is best (model always correct). It is the number of correctly answered questions divided by the number of data points. An answer is considered to be correctly answered (by the model), if the predicted answer after normalization (text is converted to lowercase, and punctuation, articles and extra whitespace are removed) matches exactly with any of the normalized ground truth answers. In the case of unanswerable questions, the empty string is considered to be the only ground truth answer.""")
59
+ with st.container():
60
+ st.write(f"**Overall performance: {overall_performance}%**")
61
+ # %%
62
+ st.header("Bias ratios")
63
+ st.write('Bias ratio is defined as the ratio of the lowest performance to the highest performance among categories that have sufficient data (with more than 50 data points) for a characteristic. The following table shows the bias ratio for each of the considered characteristic.')
64
+ fairness_results = data_dict['Fairness results']
65
+
66
+ characteristic_list = []
67
+ fairness_ratio_list = []
68
+ for key, val in fairness_results.items():
69
+ characteristic_list += [key]
70
+ fairness_ratio_list += [val['OverallFairness']]
71
+
72
+ ch_df = pd.DataFrame({
73
+ 'Characteristic': characteristic_list,
74
+ 'Bias ratio': fairness_ratio_list
75
+ })
76
+ st.dataframe(ch_df)
77
+
78
+
79
+
80
+
81
+
82
+ # %%
83
+ st.header("Robustness")
84
+
85
+ st.write(f"""We evaluate the robustness of the LLM by assessing the variation in performance when perturbations are introduced to the content outside of the prompt template. The following plot shows the performance across different levels of perturbation within a perturbation family that consists of a series of perturbation methods. We consider the following perturbation families.
86
+
87
+ - ProbTypos: {other_info_dict['ProbTypos_description']}
88
+
89
+ - MaxTypo: {other_info_dict['MaxTypo_description']}
90
+ """)
91
+
92
+ # st.write(f"ProbTypos: {other_info_dict['ProbTypos_description']}")
93
+ # st.write(f"MaxTypo: {other_info_dict['MaxTypo_description']}")
94
+
95
+ global_perturber_families = data_dict['Perturber Families']
96
+ t_pert_fig = None
97
+ perf_pert_values = []
98
+ normalized_perf_pert_values = []
99
+ family_levels = []
100
+ family_names_list = []
101
+ levels_index_list = []
102
+ for item in global_perturber_families:
103
+ family_name = item['family name']
104
+ family_results = data_dict['Performance Robustness']['Perturber family wise results'][family_name]["PerformancePerturbers"]# TODO: change the structuer of post processing here
105
+ family_levels += item['levels']
106
+ original_perf = family_results[item['levels'][0]]
107
+ count = 0
108
+ for t_item in item['levels']:
109
+ perf_pert_values += [family_results[t_item]]
110
+ normalized_perf_pert_values += [family_results[t_item]/original_perf]
111
+ family_names_list += [family_name]
112
+ levels_index_list += [count]
113
+ count += 1
114
+
115
+ t_pert_df_global = pd.DataFrame({
116
+ 'Perturbation level': family_levels,
117
+ 'Performance': perf_pert_values,
118
+ 'normalized performance': normalized_perf_pert_values,
119
+ 'Perturbation family': family_names_list,
120
+ 'Levels' : levels_index_list
121
+ })
122
+
123
+ t_pert_fig = px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
124
+ t_pert_fig.update_xaxes(tickmode='linear', dtick=1)
125
+
126
+
127
+ st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
128
+
129
+
130
+ # %%
131
+ st.header("Characteristic results")
132
+
133
+ embedder_categories = data_dict['Embedder categories']
134
+
135
+ option = st.selectbox(
136
+ 'Select characteristic:',
137
+ sorted(list(embedder_categories.keys())))
138
+
139
+
140
+ st.write('The following are the categories:')
141
+ st.write(', '.join(embedder_categories[option]))
142
+
143
+ if 'Length' in option:
144
+ st.write("Note: Here, length denotes the number of characters. ")
145
+
146
+ if 'gender' in option:
147
+ st.write(other_info_dict['gender_categories_text'])
148
+
149
+ if 'ethnicity' in option:
150
+ st.write(other_info_dict['ethnicity_categories_text'])
151
+
152
+ embedder_perf_ci_table = data_dict['Performance results'][option]['CI_Table']
153
+ n_points = data_dict['n points']
154
+ category_share_of_data = {}
155
+ categories_list = []
156
+ share_of_data_list = []
157
+ n_points_list = []
158
+ for key, val in embedder_perf_ci_table.items():
159
+ categories_list += [val['category']]
160
+ share_of_data_list += [val['Share of Data']]
161
+ n_points_list += [int(val['Share of Data']*n_points/100)]
162
+
163
+ st.markdown("---")
164
+ st.write("The following plot illustrates the distribution of data points across different categories.")
165
+ t_df = pd.DataFrame({
166
+ 'Category': categories_list,
167
+ 'Share of data': share_of_data_list,
168
+ 'Number of points': n_points_list
169
+ })
170
+ fig = px.bar(t_df, x='Category', y='Number of points')
171
+
172
+ st.plotly_chart(fig, theme="streamlit", use_container_width=True)
173
+ st.markdown("---")
174
+
175
+ st.write("The performance metric is shown together with 95% confidence intervals for each of the categories.")
176
+
177
+
178
+ embedder_fair_ci_table = data_dict['Fairness results'][option]['CI_Table']
179
+ categories_list = []
180
+ estimates_list = []
181
+ uppers_list = []
182
+ lowers_list = []
183
+ for key, val in embedder_fair_ci_table.items():
184
+ categories_list += [val['category']]
185
+ estimates_list += [val['Estimate']]
186
+ uppers_list += [val['Upper']]
187
+ lowers_list += [val['Lower']]
188
+
189
+ t_fair_df = pd.DataFrame({
190
+ 'Category': categories_list,
191
+ 'Estimate': estimates_list,
192
+ 'Upper': uppers_list,
193
+ 'Lower': lowers_list
194
+ })
195
+
196
+ t_fair_df['Diff upper'] = t_fair_df['Upper'] - t_fair_df['Estimate']
197
+ t_fair_df['Diff lower'] = t_fair_df['Estimate'] - t_fair_df['Lower']
198
+
199
+
200
+ fig_fair = px.scatter(t_fair_df, x='Category', y='Estimate', error_y='Diff upper', error_y_minus='Diff lower')
201
+ fig_fair.update_layout(yaxis_title="Performance in %")
202
+
203
+ st.plotly_chart(fig_fair, theme="streamlit", use_container_width=True)
204
+ st.markdown("---")
205
+
206
+ st.write('The following plots show the normalized average performance for each category of a characteristic, for each level of perturbation, starting with no perturbation. Each curve represents the normalized average performance on a category, by which we mean that we divide the average performance at every level of perturbation by the average performance without perturbation. ')
207
+
208
+ t_result = data_dict['Performance Robustness']['Embedder wise results'][option]
209
+ # Embedder categories
210
+ for item in global_perturber_families:
211
+ family_name = item['family name']
212
+ dfs_list = []
213
+ count = 0
214
+ for t_item in item['levels']:
215
+ df = pd.DataFrame(t_result[t_item])
216
+ df['Perturber'] = t_item
217
+ df['Perturber family'] = family_name
218
+ df['Levels'] = count
219
+ dfs_list += [df]
220
+ count += 1
221
+ merged_df = pd.concat(dfs_list, axis=0)
222
+
223
+ temp_header = f'Perturber family: {family_name}'
224
+ # st.markdown(f'##### {temp_header}')
225
+ t_pert_fig = px.line(merged_df, x="Levels", y="normalized performance", color='category')
226
+ t_pert_fig.update_layout(yaxis_title="Normalized performance")
227
+
228
+ # px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
229
+ t_pert_df_global_temp = t_pert_df_global[t_pert_df_global['Perturbation family'] == family_name].copy(deep=True)
230
+ t_pert_df_global_temp['category'] = 'Overall'
231
+
232
+ t_pert_fig.add_trace(px.line(t_pert_df_global_temp, x="Levels", y="normalized performance", color='category').data[0])
233
+ t_pert_fig.update_xaxes(tickmode='linear', dtick=1)
234
+
235
+ st.write(f'The following plot illustrates the normalized performance of the model across different categories for the perturbation family: {family_name}.')
236
+ st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
237
+ st.markdown("---")