lterriel commited on
Commit
f832cf5
Β·
1 Parent(s): b4ed97c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -22
app.py CHANGED
@@ -100,23 +100,23 @@ if option == "Inter-Annotator Agreement results":
100
  pairs = list(combinations(project_analyzed.annotations_per_coders, 2))
101
 
102
  # Display in app
103
- cont_kappa = st.container()
104
- cont_kappa.title("Inter-Annotator Agreement (IAA) results")
105
- tab1, tab2, tab3, tab4, tab5 = st.tabs(
106
- ["πŸ“ˆ IAA metrics", "πŸ—ƒ IAA Metrics Legend", "βœ”οΈ Agree annotations", "❌ Disagree annotations",
107
- "🏷️ Global Labels Statistics"])
108
- tab1.subheader("Fleiss Kappa (global score for group):")
109
 
110
 
111
 
112
- tab1.markdown(interpret_kappa(round(fleiss_kappa_function(matrix), 2)), unsafe_allow_html=True)
113
- tab1.subheader("Cohen Kappa Annotators Matrix (score between annotators):")
114
  # tab1.dataframe(df)
115
  data = []
116
  for coder_1, coder_2 in pairs:
117
  cohen_function = cohen_kappa_function(project_analyzed.labels_per_coder[coder_1], project_analyzed.labels_per_coder[coder_2])
118
  data.append(((coder_1, coder_2), cohen_function))
119
- tab1.markdown(f"* {coder_1} <> {coder_2} : {interpret_kappa(cohen_function)}", unsafe_allow_html=True)
120
  # print(f"* {coder_1} <> {coder_2} : {cohen_function}")
121
 
122
  intermediary = defaultdict(Counter)
@@ -132,13 +132,13 @@ if option == "Inter-Annotator Agreement results":
132
  mask = df_cm.values == 0
133
  sn.set(font_scale=0.7) # for label size
134
  colors = ["#e74c3c", "#f39c12", "#f4d03f", "#5dade2", "#58d68d", "#28b463"]
135
- width = tab1.slider("matrix width", 1, 10, 14)
136
- height = tab1.slider("matrix height", 1, 10, 4)
137
  fig, ax = pylt.subplots(figsize=(width, height))
138
  sn.heatmap(df_cm, cmap=colors, annot=True, mask=mask, annot_kws={"size": 7}, vmin=0, vmax=1, ax=ax) # font size
139
  # plt.show()
140
- tab1.pyplot(ax.figure)
141
- tab2.markdown("""
142
  <table>
143
  <thead>
144
  <tr>
@@ -217,16 +217,16 @@ if option == "Inter-Annotator Agreement results":
217
 
218
  csv_agree = convert_df(df_agree)
219
 
220
- tab3.subheader("Total agree annotations:")
221
- tab3.markdown(f"{total_unanime} / {len(df)} annotations ({round((total_unanime / len(df)) * 100, 2)} %)")
222
- tab3.download_button(
223
  "Press to Download CSV",
224
  csv_agree,
225
  "csv_annotators_agree.csv",
226
  "text/csv",
227
  key='download-csv-1'
228
  )
229
- tab3.dataframe(df_agree)
230
 
231
 
232
  ## Disagree part
@@ -238,17 +238,17 @@ if option == "Inter-Annotator Agreement results":
238
  df_disagree = df[df[columns_to_compare].apply(lambda row: check_all_not_equal(row), axis=1)]
239
  total_desaccord = len(df_disagree)
240
  csv_disagree = convert_df(df_disagree)
241
- tab4.subheader("Total disagree annotations:")
242
- tab4.markdown(
243
  f"{total_desaccord} / {len(df)} annotations ({round((total_desaccord / len(df)) * 100, 2)} %)")
244
- tab4.download_button(
245
  "Press to Download CSV",
246
  csv_disagree,
247
  "csv_annotators_disagree.csv",
248
  "text/csv",
249
  key='download-csv-2'
250
  )
251
- tab4.dataframe(df_disagree)
252
 
253
 
254
  ## alignement chart labels
@@ -299,7 +299,7 @@ if option == "Inter-Annotator Agreement results":
299
  return fig
300
 
301
  f = plot_pies(to_pie)
302
- tab5.pyplot(f.figure)
303
 
304
  # global project results view
305
 
 
100
  pairs = list(combinations(project_analyzed.annotations_per_coders, 2))
101
 
102
  # Display in app
103
+ #cont_kappa = st.container()
104
+ st.title("Inter-Annotator Agreement (IAA) results")
105
+ #tab1, tab2, tab3, tab4, tab5 = st.tabs(
106
+ # ["πŸ“ˆ IAA metrics", "πŸ—ƒ IAA Metrics Legend", "βœ”οΈ Agree annotations", "❌ Disagree annotations",
107
+ # "🏷️ Global Labels Statistics"])
108
+ st.subheader("Fleiss Kappa (global score for group):")
109
 
110
 
111
 
112
+ st.markdown(interpret_kappa(round(fleiss_kappa_function(matrix), 2)), unsafe_allow_html=True)
113
+ st.subheader("Cohen Kappa Annotators Matrix (score between annotators):")
114
  # tab1.dataframe(df)
115
  data = []
116
  for coder_1, coder_2 in pairs:
117
  cohen_function = cohen_kappa_function(project_analyzed.labels_per_coder[coder_1], project_analyzed.labels_per_coder[coder_2])
118
  data.append(((coder_1, coder_2), cohen_function))
119
+ st.markdown(f"* {coder_1} <> {coder_2} : {interpret_kappa(cohen_function)}", unsafe_allow_html=True)
120
  # print(f"* {coder_1} <> {coder_2} : {cohen_function}")
121
 
122
  intermediary = defaultdict(Counter)
 
132
  mask = df_cm.values == 0
133
  sn.set(font_scale=0.7) # for label size
134
  colors = ["#e74c3c", "#f39c12", "#f4d03f", "#5dade2", "#58d68d", "#28b463"]
135
+ width = st.slider("matrix width", 1, 10, 14)
136
+ height = st.slider("matrix height", 1, 10, 4)
137
  fig, ax = pylt.subplots(figsize=(width, height))
138
  sn.heatmap(df_cm, cmap=colors, annot=True, mask=mask, annot_kws={"size": 7}, vmin=0, vmax=1, ax=ax) # font size
139
  # plt.show()
140
+ st.pyplot(ax.figure)
141
+ st.markdown("""
142
  <table>
143
  <thead>
144
  <tr>
 
217
 
218
  csv_agree = convert_df(df_agree)
219
 
220
+ st.subheader("Total agree annotations:")
221
+ st.markdown(f"{total_unanime} / {len(df)} annotations ({round((total_unanime / len(df)) * 100, 2)} %)")
222
+ st.download_button(
223
  "Press to Download CSV",
224
  csv_agree,
225
  "csv_annotators_agree.csv",
226
  "text/csv",
227
  key='download-csv-1'
228
  )
229
+ st.dataframe(df_agree)
230
 
231
 
232
  ## Disagree part
 
238
  df_disagree = df[df[columns_to_compare].apply(lambda row: check_all_not_equal(row), axis=1)]
239
  total_desaccord = len(df_disagree)
240
  csv_disagree = convert_df(df_disagree)
241
+ st.subheader("Total disagree annotations:")
242
+ st.markdown(
243
  f"{total_desaccord} / {len(df)} annotations ({round((total_desaccord / len(df)) * 100, 2)} %)")
244
+ st.download_button(
245
  "Press to Download CSV",
246
  csv_disagree,
247
  "csv_annotators_disagree.csv",
248
  "text/csv",
249
  key='download-csv-2'
250
  )
251
+ st.dataframe(df_disagree)
252
 
253
 
254
  ## alignement chart labels
 
299
  return fig
300
 
301
  f = plot_pies(to_pie)
302
+ st.pyplot(f.figure)
303
 
304
  # global project results view
305