pleonova commited on
Commit
9aab71d
2 Parent(s): 9bdc126 3d41bbf

Merge pull request #2 from pleonova/multiple-texts

Browse files
Files changed (1) hide show
  1. app.py +289 -122
app.py CHANGED
@@ -18,49 +18,112 @@ ex_long_text = example_long_text_load()
18
 
19
 
20
  # if __name__ == '__main__':
 
 
 
21
  st.markdown("### Long Text Summarization & Multi-Label Classification")
22
- st.write("This app summarizes and then classifies your long text with multiple labels using [BART Large MNLI](https://huggingface.co/facebook/bart-large-mnli). The keywords are generated using [KeyBERT](https://github.com/MaartenGr/KeyBERT).")
23
- st.write("__Inputs__: User enters their own custom text and labels.")
24
- st.write("__Outputs__: A summary of the text, likelihood percentages for each label and a downloadable csv of the results. \
25
  Includes additional options to generate a list of keywords and/or evaluate results against a list of ground truth labels, if available.")
26
 
 
 
 
 
 
27
  example_button = st.button(label='See Example')
28
  if example_button:
29
  example_text = ex_long_text #ex_text
30
  display_text = 'Excerpt from Frankenstein:' + example_text + '"\n\n' + "[This is an excerpt from Project Gutenberg's Frankenstein. " + ex_license + "]"
31
  input_labels = ex_labels
32
  input_glabels = ex_glabels
 
33
  else:
34
  display_text = ''
35
  input_labels = ''
36
  input_glabels = ''
 
 
37
 
38
 
39
  with st.form(key='my_form'):
 
 
 
 
40
  text_input = st.text_area("Input any text you want to summarize & classify here (keep in mind very long text will take a while to process):", display_text)
41
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  gen_keywords = st.radio(
43
- "Generate keywords from text?",
44
  ('Yes', 'No')
45
  )
46
 
47
- if text_input == display_text and display_text != '':
48
- text_input = example_text
 
 
49
 
50
- labels = st.text_input('Enter possible topic labels, which can be either keywords and/or general themes (comma-separated):',input_labels, max_chars=1000)
 
 
 
 
 
51
  labels = list(set([x.strip() for x in labels.strip().split(',') if len(x.strip()) > 0]))
52
-
53
- glabels = st.text_input('If available, enter ground truth topic labels to evaluate results, otherwise leave blank (comma-separated):',input_glabels, max_chars=1000)
 
 
 
 
 
 
 
 
 
 
54
  glabels = list(set([x.strip() for x in glabels.strip().split(',') if len(x.strip()) > 0]))
55
 
56
- threshold_value = st.slider(
57
- 'Select a threshold cutoff for matching percentage (used for ground truth label evaluation)',
58
- 0.0, 1.0, (0.5))
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  submit_button = st.form_submit_button(label='Submit')
61
 
62
  st.write("_For improvments/suggestions, please file an issue here: https://github.com/pleonova/multi-label-summary-text_")
63
 
 
 
 
 
64
  with st.spinner('Loading pretrained models...'):
65
  start = time.time()
66
  summarizer = md.load_summary_model()
@@ -74,126 +137,230 @@ with st.spinner('Loading pretrained models...'):
74
  kw_model = md.load_keyword_model()
75
  k_time = round(time.time() - start,4)
76
 
77
- st.success(f'Time taken to load various models: {k_time}s for KeyBERT model & {s_time}s for BART summarizer mnli model & {c_time}s for BART classifier mnli model.')
 
78
 
79
 
80
  if submit_button or example_button:
81
- if len(text_input) == 0:
 
 
 
82
  st.error("Enter some text to generate a summary")
83
  else:
84
- with st.spinner('Breaking up text into more reasonable chunks (tranformers cannot exceed a 1024 token max)...'):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  # For each body of text, create text chunks of a certain token size required for the transformer
86
- nested_sentences = md.create_nest_sentences(document = text_input, token_max_length = 1024)
87
- # For each chunk of sentences (within the token max)
88
- text_chunks = []
89
- for n in range(0, len(nested_sentences)):
90
- tc = " ".join(map(str, nested_sentences[n]))
91
- text_chunks.append(tc)
92
-
93
- if gen_keywords == 'Yes':
94
- st.markdown("### Top Keywords")
95
- with st.spinner("Generating keywords from text..."):
96
-
97
- kw_df = pd.DataFrame()
98
- for text_chunk in text_chunks:
99
- keywords_list = md.keyword_gen(kw_model, text_chunk)
100
- kw_df = kw_df.append(pd.DataFrame(keywords_list))
101
- kw_df.columns = ['keyword', 'score']
102
- top_kw_df = kw_df.groupby('keyword')['score'].max().reset_index()
103
-
104
- top_kw_df = top_kw_df.sort_values('score', ascending = False).reset_index().drop(['index'], axis=1)
105
- st.dataframe(top_kw_df.head(10))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
 
 
 
 
107
  st.markdown("### Summary")
108
- with st.spinner(f'Generating summaries for {len(text_chunks)} text chunks (this may take a minute)...'):
109
-
110
- my_expander = st.expander(label=f'Expand to see intermediate summary generation details for {len(text_chunks)} text chunks')
111
- with my_expander:
112
- summary = []
113
-
114
- st.markdown("_Once the original text is broken into smaller chunks (totaling no more than 1024 tokens, \
115
- with complete setences), each block of text is then summarized separately using BART NLI \
116
- and then combined at the very end to generate the final summary._")
117
-
118
- for num_chunk, text_chunk in enumerate(text_chunks):
119
- st.markdown(f"###### Original Text Chunk {num_chunk+1}/{len(text_chunks)}" )
120
- st.markdown(text_chunk)
121
-
122
- chunk_summary = md.summarizer_gen(summarizer, sequence=text_chunk, maximum_tokens = 300, minimum_tokens = 20)
123
- summary.append(chunk_summary)
124
- st.markdown(f"###### Partial Summary {num_chunk+1}/{len(text_chunks)}")
125
- st.markdown(chunk_summary)
126
- # Combine all the summaries into a list and compress into one document, again
127
- final_summary = " \n\n".join(list(summary))
128
-
129
- st.markdown(final_summary)
130
-
131
- if len(text_input) == 0 or len(labels) == 0:
 
 
 
 
 
 
 
 
 
 
 
 
132
  st.error('Enter some text and at least one possible topic to see label predictions.')
133
  else:
134
- st.markdown("### Top Label Predictions on Summary vs Full Text")
135
- with st.spinner('Matching labels...'):
136
- topics, scores = md.classifier_zero(classifier, sequence=final_summary, labels=labels, multi_class=True)
137
- # st.markdown("### Top Label Predictions: Combined Summary")
138
- # plot_result(topics[::-1][:], scores[::-1][:])
139
- # st.markdown("### Download Data")
140
- data = pd.DataFrame({'label': topics, 'scores_from_summary': scores})
141
- # st.dataframe(data)
142
- # coded_data = base64.b64encode(data.to_csv(index = False). encode ()).decode()
143
- # st.markdown(
144
- # f'<a href="data:file/csv;base64, {coded_data}" download = "data.csv">Download Data</a>',
145
- # unsafe_allow_html = True
146
- # )
147
-
148
- topics_ex_text, scores_ex_text = md.classifier_zero(classifier, sequence=text_input, labels=labels, multi_class=True)
149
- plot_dual_bar_chart(topics, scores, topics_ex_text, scores_ex_text)
150
-
151
- data_ex_text = pd.DataFrame({'label': topics_ex_text, 'scores_from_full_text': scores_ex_text})
152
-
153
- data2 = pd.merge(data, data_ex_text, on = ['label'])
154
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  if len(glabels) > 0:
156
  gdata = pd.DataFrame({'label': glabels})
157
- gdata['is_true_label'] = int(1)
158
-
159
- data2 = pd.merge(data2, gdata, how = 'left', on = ['label'])
160
- data2['is_true_label'].fillna(0, inplace = True)
161
-
162
- st.markdown("### Data Table")
163
- with st.spinner('Generating a table of results and a download link...'):
164
- st.dataframe(data2)
165
-
166
- @st.cache
167
- def convert_df(df):
168
- # IMPORTANT: Cache the conversion to prevent computation on every rerun
169
- return df.to_csv().encode('utf-8')
170
- csv = convert_df(data2)
171
- st.download_button(
172
- label="Download data as CSV",
173
- data=csv,
174
- file_name='text_labels.csv',
175
- mime='text/csv',
176
- )
177
- # coded_data = base64.b64encode(data2.to_csv(index = False). encode ()).decode()
178
- # st.markdown(
179
- # f'<a href="data:file/csv;base64, {coded_data}" download = "data.csv">Click here to download the data</a>',
180
- # unsafe_allow_html = True
181
- # )
182
-
183
- if len(glabels) > 0:
184
- st.markdown("### Evaluation Metrics")
185
- with st.spinner('Evaluating output against ground truth...'):
186
-
187
- section_header_description = ['Summary Label Performance', 'Original Full Text Label Performance']
188
- data_headers = ['scores_from_summary', 'scores_from_full_text']
189
- for i in range(0,2):
190
- st.markdown(f"###### {section_header_description[i]}")
191
- report = classification_report(y_true = data2[['is_true_label']],
192
- y_pred = (data2[[data_headers[i]]] >= threshold_value) * 1.0,
193
- output_dict=True)
194
- df_report = pd.DataFrame(report).transpose()
195
- st.markdown(f"Threshold set for: {threshold_value}")
196
- st.dataframe(df_report)
197
 
198
  st.success('All done!')
199
- st.balloons()
 
18
 
19
 
20
  # if __name__ == '__main__':
21
+ ###################################
22
+ ######## App Description ##########
23
+ ###################################
24
  st.markdown("### Long Text Summarization & Multi-Label Classification")
25
+ st.write("This app summarizes and then classifies your long text(s) with multiple labels using [BART Large MNLI](https://huggingface.co/facebook/bart-large-mnli). The keywords are generated using [KeyBERT](https://github.com/MaartenGr/KeyBERT).")
26
+ st.write("__Inputs__: User enters their own custom text(s) and labels.")
27
+ st.write("__Outputs__: A summary of the text, likelihood match score for each label and a downloadable csv of the results. \
28
  Includes additional options to generate a list of keywords and/or evaluate results against a list of ground truth labels, if available.")
29
 
30
+
31
+
32
+ ###################################
33
+ ######## Example Input ##########
34
+ ###################################
35
  example_button = st.button(label='See Example')
36
  if example_button:
37
  example_text = ex_long_text #ex_text
38
  display_text = 'Excerpt from Frankenstein:' + example_text + '"\n\n' + "[This is an excerpt from Project Gutenberg's Frankenstein. " + ex_license + "]"
39
  input_labels = ex_labels
40
  input_glabels = ex_glabels
41
+ title_name = 'Frankenstein, Chapter 3'
42
  else:
43
  display_text = ''
44
  input_labels = ''
45
  input_glabels = ''
46
+ title_name = 'Submitted Text'
47
+
48
 
49
 
50
  with st.form(key='my_form'):
51
+ ###################################
52
+ ######## Form: Step 1 ##########
53
+ ###################################
54
+ st.markdown("##### Step 1: Upload Text")
55
  text_input = st.text_area("Input any text you want to summarize & classify here (keep in mind very long text will take a while to process):", display_text)
56
+
57
+ text_csv_expander = st.expander(label=f'Want to upload multiple texts at once? Expand to upload your text files below.', expanded=False)
58
+ with text_csv_expander:
59
+ st.markdown('##### Choose one of the options below:')
60
+ st.write("__Option A:__")
61
+ uploaded_text_files = st.file_uploader(label="Upload file(s) that end with the .txt suffix",
62
+ accept_multiple_files=True, key = 'text_uploader',
63
+ type='txt')
64
+ st.write("__Option B:__")
65
+ uploaded_csv_text_files = st.file_uploader(label='Upload a CSV file with two columns: "title" and "text"',
66
+ accept_multiple_files=False, key = 'csv_text_uploader',
67
+ type='csv')
68
+
69
+ if text_input == display_text and display_text != '':
70
+ text_input = example_text
71
+
72
  gen_keywords = st.radio(
73
+ "Generate keywords from text? (independent from the input labels below)",
74
  ('Yes', 'No')
75
  )
76
 
77
+ gen_summary = st.radio(
78
+ "Generate summary from text? (recommended for label matching below, but will take longer)",
79
+ ('Yes', 'No')
80
+ )
81
 
82
+ ###################################
83
+ ######## Form: Step 2 ##########
84
+ ###################################
85
+ st.write('\n')
86
+ st.markdown("##### Step 2: Enter Labels")
87
+ labels = st.text_input('Enter possible topic labels, which can be either keywords and/or general themes (comma-separated):',input_labels, max_chars=2000)
88
  labels = list(set([x.strip() for x in labels.strip().split(',') if len(x.strip()) > 0]))
89
+
90
+ labels_csv_expander = st.expander(label=f'Prefer to upload a list of labels instead? Click here to upload your CSV file.',expanded=False)
91
+ with labels_csv_expander:
92
+ uploaded_labels_file = st.file_uploader("Choose a CSV file with one column and no header, where each cell is a separate label",
93
+ key='labels_uploader')
94
+
95
+ ###################################
96
+ ######## Form: Step 3 ##########
97
+ ###################################
98
+ st.write('\n')
99
+ st.markdown("##### Step 3: Provide Ground Truth Labels (_Optional_)")
100
+ glabels = st.text_input('If available, enter ground truth topic labels to evaluate results, otherwise leave blank (comma-separated):',input_glabels, max_chars=2000)
101
  glabels = list(set([x.strip() for x in glabels.strip().split(',') if len(x.strip()) > 0]))
102
 
103
+
104
+ glabels_csv_expander = st.expander(label=f'Have a file with labels for the text? Click here to upload your CSV file.', expanded=False)
105
+ with glabels_csv_expander:
106
+ st.markdown('##### Choose one of the options below:')
107
+ st.write("__Option A:__")
108
+ uploaded_onetext_glabels_file = st.file_uploader("Single Text: Choose a CSV file with one column and no header, where each cell is a separate label",
109
+ key = 'onetext_glabels_uploader')
110
+ st.write("__Option B:__")
111
+ uploaded_multitext_glabels_file = st.file_uploader('Multiple Text: Choose a CSV file with two columns "title" and "label", with the cells in the title column matching the name of the files uploaded in step #1.',
112
+ key = 'multitext_glabels_uploader')
113
+
114
+
115
+ # threshold_value = st.slider(
116
+ # 'Select a threshold cutoff for matching percentage (used for ground truth label evaluation)',
117
+ # 0.0, 1.0, (0.5))
118
 
119
  submit_button = st.form_submit_button(label='Submit')
120
 
121
  st.write("_For improvments/suggestions, please file an issue here: https://github.com/pleonova/multi-label-summary-text_")
122
 
123
+
124
+ ###################################
125
+ ####### Model Load Time #########
126
+ ###################################
127
  with st.spinner('Loading pretrained models...'):
128
  start = time.time()
129
  summarizer = md.load_summary_model()
 
137
  kw_model = md.load_keyword_model()
138
  k_time = round(time.time() - start,4)
139
 
140
+ st.spinner(f'Time taken to load various models: {k_time}s for KeyBERT model & {s_time}s for BART summarizer mnli model & {c_time}s for BART classifier mnli model.')
141
+ # st.success(None)
142
 
143
 
144
  if submit_button or example_button:
145
+ ###################################
146
+ ######## Load Text Data #######
147
+ ###################################
148
+ if len(text_input) == 0 and uploaded_text_files is None and uploaded_csv_text_files is None:
149
  st.error("Enter some text to generate a summary")
150
  else:
151
+
152
+ if len(text_input) != 0:
153
+ text_df = pd.DataFrame.from_dict({'title': [title_name], 'text': [text_input]})
154
+
155
+ # OPTION A
156
+ elif uploaded_text_files is not None:
157
+ st.markdown("### Text Inputs")
158
+ st.write('Files concatenated into a dataframe:')
159
+ file_names = []
160
+ raw_texts = []
161
+ for uploaded_file in uploaded_text_files:
162
+ text = str(uploaded_file.read(), "utf-8")
163
+ raw_texts.append(text)
164
+ title_file_name = uploaded_file.name.replace('.txt','')
165
+ file_names.append(title_file_name)
166
+ text_df = pd.DataFrame({'title': file_names,
167
+ 'text': raw_texts})
168
+ st.dataframe(text_df.head())
169
+ st.download_button(
170
+ label="Download data as CSV",
171
+ data=text_df.to_csv().encode('utf-8'),
172
+ file_name='title_text.csv',
173
+ mime='title_text/csv',
174
+ )
175
+ # OPTION B
176
+ elif uploaded_csv_text_files is not None:
177
+ text_df = pd.read_csv(uploaded_csv_text_files)
178
+
179
+ # Which input was used? If text area was used, ignore the 'title'
180
+ if len(text_input) != 0:
181
+ title_element = []
182
+ else:
183
+ title_element = ['title']
184
+
185
+
186
+ ###################################
187
+ ######## Text Chunks ##########
188
+ ###################################
189
+ with st.spinner('Breaking up text into more reasonable chunks (transformers cannot exceed a 1024 token max)...'):
190
  # For each body of text, create text chunks of a certain token size required for the transformer
191
+
192
+ text_chunks_lib = dict()
193
+ for i in range(0, len(text_df)):
194
+ nested_sentences = md.create_nest_sentences(document=text_df['text'][i], token_max_length=1024)
195
+
196
+ # For each chunk of sentences (within the token max)
197
+ text_chunks = []
198
+ for n in range(0, len(nested_sentences)):
199
+ tc = " ".join(map(str, nested_sentences[n]))
200
+ text_chunks.append(tc)
201
+ title_entry = text_df['title'][i]
202
+ text_chunks_lib[title_entry] = text_chunks
203
+
204
+
205
+ ################################
206
+ ######## Keywords ##########
207
+ ################################
208
+ if gen_keywords == 'Yes':
209
+ st.markdown("### Top Keywords")
210
+ with st.spinner("Generating keywords from text..."):
211
+
212
+ kw_dict = dict()
213
+ text_chunk_counter = 0
214
+ for key in text_chunks_lib:
215
+ keywords_list = []
216
+ for text_chunk in text_chunks_lib[key]:
217
+ text_chunk_counter += 1
218
+ keywords_list += md.keyword_gen(kw_model, text_chunk)
219
+ kw_dict[key] = dict(keywords_list)
220
+ # Display as a dataframe
221
+ kw_df0 = pd.DataFrame.from_dict(kw_dict).reset_index()
222
+ kw_df0.rename(columns={'index': 'keyword'}, inplace=True)
223
+ kw_df = pd.melt(kw_df0, id_vars=['keyword'], var_name='title', value_name='score').dropna()
224
+
225
+ kw_column_list = ['keyword', 'score']
226
+ kw_df = kw_df[kw_df['score'] > 0.25][title_element + kw_column_list].sort_values(title_element + ['score'], ascending=False).reset_index().drop(columns='index')
227
+
228
+ st.dataframe(kw_df)
229
+ st.download_button(
230
+ label="Download data as CSV",
231
+ data=kw_df.to_csv().encode('utf-8'),
232
+ file_name='title_keywords.csv',
233
+ mime='title_keywords/csv',
234
+ )
235
+
236
 
237
+ ###################################
238
+ ########## Summarize ##########
239
+ ###################################
240
+ if gen_summary == 'Yes':
241
  st.markdown("### Summary")
242
+ with st.spinner(f'Generating summaries for {len(text_df)} texts consisting of a total of {text_chunk_counter} chunks (this may take a minute)...'):
243
+ sum_dict = dict()
244
+ for i, key in enumerate(text_chunks_lib):
245
+ with st.expander(label=f'({i+1}/{len(text_df)}) Expand to see intermediate summary generation details for: {key}', expanded=False):
246
+ # for key in text_chunks_lib:
247
+ summary = []
248
+ for num_chunk, text_chunk in enumerate(text_chunks_lib[key]):
249
+ chunk_summary = md.summarizer_gen(summarizer, sequence=text_chunk, maximum_tokens=300, minimum_tokens=20)
250
+ summary.append(chunk_summary)
251
+
252
+ st.markdown(f"###### Original Text Chunk {num_chunk+1}/{len(text_chunks)}" )
253
+ st.markdown(text_chunk)
254
+ st.markdown(f"###### Partial Summary {num_chunk+1}/{len(text_chunks)}")
255
+ st.markdown(chunk_summary)
256
+
257
+ # Combine all the summaries into a list and compress into one document, again
258
+ final_summary = "\n\n".join(list(summary))
259
+ sum_dict[key] = [final_summary]
260
+
261
+ sum_df = pd.DataFrame.from_dict(sum_dict).T.reset_index()
262
+ sum_df.columns = ['title', 'summary_text']
263
+ # TO DO: Make sure summary_text does not exceed the token length
264
+
265
+ st.dataframe(sum_df)
266
+ st.download_button(
267
+ label="Download data as CSV",
268
+ data=sum_df.to_csv().encode('utf-8'),
269
+ file_name='title_summary.csv',
270
+ mime='title_summary/csv',
271
+ )
272
+
273
+ ###################################
274
+ ########## Classifier #########
275
+ ###################################
276
+ if ((len(text_input) == 0 and uploaded_text_files is None and uploaded_csv_text_files is None)
277
+ or (len(labels) == 0 and uploaded_labels_file is None)):
278
  st.error('Enter some text and at least one possible topic to see label predictions.')
279
  else:
280
+ if gen_summary == 'Yes':
281
+ st.markdown("### Top Label Predictions on Summary vs Full Text")
282
+ else:
283
+ st.markdown("### Top Label Predictions on Full Text")
284
+
285
+ if uploaded_labels_file is not None:
286
+ labels_df = pd.read_csv(uploaded_labels_file, header=None)
287
+ label_list = labels_df.iloc[:, 0]
288
+ else:
289
+ label_list = labels
290
+
291
+ with st.spinner('Matching labels...(may take some time)'):
292
+ if gen_summary == 'Yes':
293
+ labels_sum_col_list = ['title', 'label', 'scores_from_summary']
294
+ labels_sum_df = pd.DataFrame(columns=labels_sum_col_list)
295
+
296
+ labels_full_col_list = ['title', 'label', 'scores_from_full_text']
297
+ labels_full_df = pd.DataFrame(columns=labels_full_col_list)
298
+
299
+ for i in range(0, len(text_df)):
300
+ if gen_summary == 'Yes':
301
+ s_topics, s_scores = md.classifier_zero(classifier, sequence=sum_df['summary_text'][i], labels=label_list, multi_class=True)
302
+ ls_df = pd.DataFrame({'label': s_topics, 'scores_from_summary': s_scores})
303
+ ls_df['title'] = text_df['title'][i]
304
+ labels_sum_df = pd.concat([labels_sum_df, ls_df[labels_sum_col_list]])
305
+
306
+ f_topics, f_scores = md.classifier_zero(classifier, sequence=text_df['text'][i], labels=label_list, multi_class=True)
307
+ lf_df = pd.DataFrame({'label': f_topics, 'scores_from_full_text': f_scores})
308
+ lf_df['title'] = text_df['title'][i]
309
+ labels_full_df = pd.concat([labels_full_df, lf_df[labels_full_col_list]])
310
+
311
+ with st.expander(f'({i+1}/{len(text_df)}) See intermediate label matching results for: {text_df["title"][i]}'):
312
+ if gen_summary == 'Yes':
313
+ st.dataframe(pd.merge(ls_df, lf_df, on=['title','label']))
314
+ else:
315
+ st.dataframe(lf_df)
316
+
317
+ if gen_summary == 'Yes':
318
+ label_match_df = pd.merge(labels_sum_df, labels_full_df, on=['title', 'label'])
319
+ else:
320
+ label_match_df = labels_full_df.copy()
321
+
322
+ ###################################
323
+ ####### Ground Truth Labels ######
324
+ ###################################
325
  if len(glabels) > 0:
326
  gdata = pd.DataFrame({'label': glabels})
327
+ join_list = ['label']
328
+ elif uploaded_onetext_glabels_file is not None:
329
+ gdata = pd.read_csv(uploaded_onetext_glabels_file, header=None)
330
+ join_list = ['label']
331
+ gdata.columns = join_list
332
+ elif uploaded_multitext_glabels_file is not None:
333
+ gdata = pd.read_csv(uploaded_multitext_glabels_file)
334
+ join_list = ['title', 'label']
335
+ gdata.columns = join_list
336
+
337
+ if len(glabels) > 0 or uploaded_onetext_glabels_file is not None or uploaded_multitext_glabels_file is not None:
338
+ gdata['correct_match'] = True
339
+ label_match_df = pd.merge(label_match_df, gdata, how='left', on=join_list)
340
+ label_match_df['correct_match'].fillna(False, inplace=True)
341
+
342
+ st.dataframe(label_match_df) #.sort_values(['title', 'label'], ascending=[False, False]))
343
+ st.download_button(
344
+ label="Download data as CSV",
345
+ data=label_match_df.to_csv().encode('utf-8'),
346
+ file_name='title_label_sum_full.csv',
347
+ mime='title_label_sum_full/csv',
348
+ )
349
+
350
+ # if len(glabels) > 0:
351
+ # st.markdown("### Evaluation Metrics")
352
+ # with st.spinner('Evaluating output against ground truth...'):
353
+ #
354
+ # section_header_description = ['Summary Label Performance', 'Original Full Text Label Performance']
355
+ # data_headers = ['scores_from_summary', 'scores_from_full_text']
356
+ # for i in range(0,2):
357
+ # st.markdown(f"###### {section_header_description[i]}")
358
+ # report = classification_report(y_true = data2[['is_true_label']],
359
+ # y_pred = (data2[[data_headers[i]]] >= threshold_value) * 1.0,
360
+ # output_dict=True)
361
+ # df_report = pd.DataFrame(report).transpose()
362
+ # st.markdown(f"Threshold set for: {threshold_value}")
363
+ # st.dataframe(df_report)
 
 
 
364
 
365
  st.success('All done!')
366
+ st.balloons()