File size: 12,924 Bytes
0603b09
 
 
 
 
 
 
dbd94bb
0603b09
 
 
 
ad2544a
0603b09
c54a69d
 
4d8a229
 
 
9101fdb
 
 
 
 
 
 
 
 
4d8a229
 
 
 
 
 
 
dbd94bb
4d8a229
dbd94bb
 
 
 
4d8a229
dbd94bb
 
 
 
 
 
 
 
 
 
0603b09
dbd94bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad2544a
 
0603b09
 
 
9101fdb
0603b09
ad2544a
0603b09
dbd94bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0603b09
16c16dd
ad2544a
0603b09
dbd94bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0603b09
dbd94bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0603b09
dbd94bb
 
 
 
0603b09
 
 
ad2544a
 
 
 
 
 
 
 
0603b09
ad2544a
 
0603b09
dbd94bb
 
 
0603b09
dbd94bb
 
0603b09
 
dbd94bb
0603b09
 
 
ad2544a
0603b09
 
 
 
ad2544a
4d8a229
0603b09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbd94bb
 
0603b09
 
 
 
 
 
 
dbd94bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0603b09
 
 
 
 
 
16c16dd
0603b09
dbd94bb
 
 
 
0603b09
16c16dd
dbd94bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
# %%
# TODOS: Plots with plotly
import json
import pandas as pd
import streamlit as st
import plotly.express as px
from config import other_info_dict
from utils import *
# %%
st.title("Microsoft Phi-2 LLM assessment")
# st.image('model_card.png', caption='Hugging face description', use_column_width=True)
st.write("""
    Microsoft Phi-2 (https://huggingface.co/microsoft/phi-2) is a Transformer model with 2.7 billion parameters. Performance on benchmarks for common sense, language understanding, and logical reasoning is nearly state-of-the-art among models with less than 13 billion parameters. Unlike typical Large Language Models (LLM), Phi-2 has not been fine-tuned through reinforcement learning from human feedback.""")

import urllib.request 
import os
prefix_post_processing = os.environ["POST_PROCESSING_JSON"]


st.header('Evaluation dataset')
st.write(other_info_dict['data_description'])


# %%
st.header("Prompt")
st.write("For each data point in the evaluation dataset, we create a prompt for LLM by adding the context and the question to the below prompt template, while following the same structure of the prompt template.")


prompt_options_dict = {
    'Prompt option 0 (with typos and grammatical errors, two shot prompting)': 'prompt_option_0.json',
    'Prompt option 1 (Zero shot prompting)': 'prompt_option_1.json',
    'Prompt option 2 (Prompt option 2 with two shot prompting)': 'prompt_option_2.json',
    'Prompt option 3 (Prompt option 0 with minor fixes)': 'prompt_option_3.json'
}

prompt_options_list = list(prompt_options_dict.keys())

options = st.multiselect(
    'Select prompts:',
    prompt_options_list,
    [prompt_options_list[0]])

# st.write('Selected prompts:')
# for t_opt in options:
#     st.write(t_opt)
prompt_options_nums_list = []
st.markdown("---")
for t_opt in options:
    st.write(t_opt)
    prompt_option = int(prompt_options_dict[t_opt].split('_')[-1].split('.')[0])
    with open(f'prompt_{prompt_option}.txt', "r") as file:
        file_contents = file.read()
    # st.write(file_contents)
    st.text_area("Prompt template:", value=file_contents, height=100)
    st.markdown("---")
    prompt_options_nums_list += [prompt_option]

result_processor_obj_dict = {}
result_file_dict = {}
data_dicts_dict = {}
for t_result_file in options:
    result_file = prefix_post_processing + prompt_options_dict[t_result_file]

    prompt_option = int(prompt_options_dict[t_result_file].split('_')[-1].split('.')[0])

    with urllib.request.urlopen(result_file) as url:
        data_dict = json.load(url)

    

    result_processor_obj_dict[t_result_file] = ResultsProcessor(
        prompt_option=prompt_option,
        result_file=result_file,
        data_dict= data_dict
    )
    data_dicts_dict[t_result_file] = data_dict
    result_file_dict[t_result_file] = result_file


print(result_processor_obj_dict)





# t_result_file = st.selectbox(
#      'Select the prompt:',
#      list(prompt_options_dict.keys()))



    
# result_file = prefix_post_processing + prompt_options_dict[t_result_file]

# prompt_option = int(prompt_options_dict[t_result_file].split('_')[-1].split('.')[0])

# with urllib.request.urlopen(result_file) as url:
#     data_dict = json.load(url)


# # File uploader
# with open(f'prompt_{prompt_option}.txt', "r") as file:
#     file_contents = file.read()
#     # st.write(file_contents)
# st.text_area("Prompt template:", value=file_contents, height=300)
st.write("The answer to the question is obtained by post-processing the output of the LLM, wherein any additional content starting from the first 'Context: ' is disregarded.")
st.write("In the case that the LLM answers <NO ANSWER>, the output is set to an empty string.")

# 'Context: ' + context + '\n\n' + 'Question: ' + t_question + '\n\n' + 'Answer:'
# %%

st.header('Performance metric')
st.write("""The performance metric used is an estimation of the percentage of correctly answered questions, i.e. the output of the model coincides with one of the ground truth answers. The performance metric can also be interpreted as the probability that the model correctly answers a question. The performance of the model is evaluated with the exact match accuracy metric (see compute_exact function in SQuAD2.0 official evaluation script [here](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/)), taking values in [0,1], where 0 is worst (model always wrong), and 1 is best (model always correct). It is the number of correctly answered questions divided by the number of data points. An answer is considered to be correctly answered (by the model), if the predicted answer after normalization (text is converted to lowercase, and punctuation, articles and extra whitespace are removed) matches exactly with any of the normalized ground truth answers. In the case of unanswerable questions, the empty string is considered to be the only ground truth answer.""")
with st.container():
    
    overall_performance_list = []

    for t_opt in options:
        overall_performance_list += [result_processor_obj_dict[t_opt].get_overall_performance()]
    
    t_perf_overall_df = pd.DataFrame({
        'Prompt' : options,
        'Overall performance': overall_performance_list
    })
    st.dataframe(t_perf_overall_df.set_index(t_perf_overall_df.columns[0]))


    perf_dict = t_perf_overall_df['Overall performance'].describe().round(2).to_dict()
    
    if len(options) > 1:
        st.subheader('Statistics of performance metrics across selected prompts')
        st.write('Mean ', perf_dict['mean'])
        st.write('Standard deviation: ', perf_dict['std'])
        st.write('Minimum ', perf_dict['min'])
        st.write('Maximum ', perf_dict['max'])



# %%
st.header("Bias ratios")
st.write('Bias ratio is defined as the ratio of the lowest performance to the highest performance among categories that have sufficient data (with more than 50 data points) for a characteristic. The following table shows the bias ratio for each of the considered characteristic.')

processed_t_opt_dict = {}
ch_df = None
for t_opt in options:
    processed_t_opt = t_opt.split('(')[0].strip()
    processed_t_opt_dict[t_opt] = processed_t_opt
    if ch_df is None:
        ch_df = result_processor_obj_dict[t_opt].get_bias_ratios_df()
        ch_df[processed_t_opt] = ch_df['Bias ratio'].values
        
        # ch_df.rename(columns={'Bias ratio': t_opt})
    else:
        
        t_ch_df = result_processor_obj_dict[t_opt].get_bias_ratios_df()

        assert (ch_df['Characteristic'].values == t_ch_df['Characteristic'].values).all()
        ch_df[processed_t_opt] = t_ch_df['Bias ratio'].values
        
ch_df.drop(columns=['Bias ratio'], axis=1, inplace=True)


ch_df.set_index(ch_df.columns[0], inplace=True)
# ch_df = result_processor_obj_dict[t_result_file].get_bias_ratios_df()

with st.container():
    st.dataframe(ch_df) # Todo: MAX MIN HIGHLIGHT

means_list = []
stds_list= []
mins_list = []
maxs_list = []
for i in ch_df.index:
    selected_columns =  list(processed_t_opt_dict.keys()) # Example: Columns 'column1' and 'column3'
    # Create statistics for the selected rows across selected columns
    statistics = ch_df.loc[i].describe().loc[['mean', 'std', 'min', 'max']].to_dict()
    means_list += [statistics['mean']]
    stds_list += [statistics['std']]
    mins_list += [statistics['min']]
    maxs_list += [statistics['max']]

t_ch_df = pd.DataFrame({
    'Mean': means_list,
    'Standard deviation' : stds_list,
    'Minimum': mins_list,
    'Maximum': maxs_list
})
t_ch_df.index = ch_df.index
if len(options) > 1:
    with st.container():
        st.dataframe(t_ch_df) 


# %%
st.header("Robustness")

st.write(f"""We evaluate the robustness of the LLM by assessing the variation in performance when perturbations are introduced to the content outside of the prompt template. The following plot shows the performance across different levels of perturbation within a perturbation family that consists of a series of perturbation methods. We consider the following perturbation families.

- ProbTypos: {other_info_dict['ProbTypos_description']}

- MaxTypo: {other_info_dict['MaxTypo_description']}
""")

# st.write(f"ProbTypos: {other_info_dict['ProbTypos_description']}")
# st.write(f"MaxTypo: {other_info_dict['MaxTypo_description']}")

for t_opt in options:
    st.write('Prompt used : ', t_opt)
    t_pert_df_global = result_processor_obj_dict[t_opt].get_global_perturbers_df()

    t_pert_fig = px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
    t_pert_fig.update_xaxes(tickmode='linear', dtick=1)


    st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)


# %%
st.header("Characteristic results")

embedder_categories = data_dict['Embedder categories']

option = st.selectbox(
     'Select characteristic:',
     sorted(list(embedder_categories.keys())))


st.write('The following are the categories:')
st.write(', '.join(embedder_categories[option]))

if 'Length' in option:
    st.write("Note: Here, length denotes the number of characters. ")

if 'gender' in option:
    st.write(other_info_dict['gender_categories_text'])

if 'ethnicity' in option:
    st.write(other_info_dict['ethnicity_categories_text'])


t_df = result_processor_obj_dict[options[0]].get_data_distribution(option)

fig = px.bar(t_df, x='Category', y='Number of points')

st.plotly_chart(fig, theme="streamlit", use_container_width=True)
st.markdown("---")



st.write("The performance metric for each category is shown across the selected prompts.")
count = 0
t_fair_dfs_list = []
for t_opt in options:
    t_fair_df = result_processor_obj_dict[t_opt].get_fairness_confidence_interval_df(option)
    t_fair_df['Prompt'] = processed_t_opt_dict[t_opt]
    t_fair_dfs_list += [t_fair_df]
    count  +=1

merged_t_fair_df = pd.concat(t_fair_dfs_list, axis=0)


fig_fair =  px.scatter(merged_t_fair_df, x='Category', y='Estimate', color='Prompt', symbol='Prompt')

# fig_fair = None

fig_fair.update_layout(yaxis_title="Performance in %")

st.plotly_chart(fig_fair, theme="streamlit", use_container_width=True)


st.markdown("---")
st.write("The performance metric is shown together with 95% confidence intervals for each category, across the selected prompts.")

temp_options = st.multiselect(
    'Choose from your pre-selected prompts:',
    options,
    [options[0]])

t_fair_dfs_list_map = {}
count = 0
t_fair_dfs_list = []
for t_opt in temp_options:
    t_fair_df = result_processor_obj_dict[t_opt].get_fairness_confidence_interval_df(option)
    t_fair_df['Prompt'] = processed_t_opt_dict[t_opt]
    t_fair_dfs_list_map[t_opt] = count
    t_fair_dfs_list += [t_fair_df]
    count  +=1

merged_t_fair_df = pd.concat(t_fair_dfs_list, axis=0)


fig_fair =  px.scatter(merged_t_fair_df, x='Category', y='Estimate', error_y='Diff upper', error_y_minus='Diff lower', color='Prompt', symbol='Prompt')

# fig_fair = None

fig_fair.update_layout(yaxis_title="Performance in %")

st.plotly_chart(fig_fair, theme="streamlit", use_container_width=True)
st.markdown("---")

st.write('The following plots show the normalized average performance for each category of a characteristic, for each level of perturbation, starting with no perturbation. Each curve represents the normalized average performance on a category, by which we mean that we divide the average performance at every level of perturbation by the average performance without perturbation. ')

temp_options_2 = st.multiselect(
    'Choose from your pre-selected prompts:',
    options,
    [options[0]], key='Performance Robustness')

st.markdown("---")
for t_opt in temp_options_2:
    results_pert_rob_dict = result_processor_obj_dict[t_result_file].get_performance_robustness(option)
    merged_dfs_list = results_pert_rob_dict['merged_dfs_list']
    t_pert_df_global_temps_list = results_pert_rob_dict['t_pert_df_global_temps_list']
    family_names_list = results_pert_rob_dict['family_names_list']

    for merged_df, t_pert_df_global_temp, family_name in zip(merged_dfs_list, t_pert_df_global_temps_list, family_names_list):
        title_name = 'Perturbation family: ' + family_name + '\n\n Prompt : ' + t_opt
        t_pert_fig = px.line(merged_df, x="Levels", y="normalized performance", color='category')
        t_pert_fig.update_layout(yaxis_title="Normalized performance")
        
        t_pert_fig.add_trace(px.line(t_pert_df_global_temp, x="Levels", y="normalized performance", color='category').data[0])
        t_pert_fig.update_xaxes(tickmode='linear')
        # t_pert_fig.update_layout(title=title_name)



        # st.write(f'The following plot illustrates the normalized performance of the model across different categories for the perturbation family: {family_name}.')
        st.write(title_name)
        st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
        st.markdown("---")