Merge pull request #2 from pleonova/multiple-texts
Browse files
app.py
CHANGED
@@ -18,49 +18,112 @@ ex_long_text = example_long_text_load()
|
|
18 |
|
19 |
|
20 |
# if __name__ == '__main__':
|
|
|
|
|
|
|
21 |
st.markdown("### Long Text Summarization & Multi-Label Classification")
|
22 |
-
st.write("This app summarizes and then classifies your long text with multiple labels using [BART Large MNLI](https://huggingface.co/facebook/bart-large-mnli). The keywords are generated using [KeyBERT](https://github.com/MaartenGr/KeyBERT).")
|
23 |
-
st.write("__Inputs__: User enters their own custom text and labels.")
|
24 |
-
st.write("__Outputs__: A summary of the text, likelihood
|
25 |
Includes additional options to generate a list of keywords and/or evaluate results against a list of ground truth labels, if available.")
|
26 |
|
|
|
|
|
|
|
|
|
|
|
27 |
example_button = st.button(label='See Example')
|
28 |
if example_button:
|
29 |
example_text = ex_long_text #ex_text
|
30 |
display_text = 'Excerpt from Frankenstein:' + example_text + '"\n\n' + "[This is an excerpt from Project Gutenberg's Frankenstein. " + ex_license + "]"
|
31 |
input_labels = ex_labels
|
32 |
input_glabels = ex_glabels
|
|
|
33 |
else:
|
34 |
display_text = ''
|
35 |
input_labels = ''
|
36 |
input_glabels = ''
|
|
|
|
|
37 |
|
38 |
|
39 |
with st.form(key='my_form'):
|
|
|
|
|
|
|
|
|
40 |
text_input = st.text_area("Input any text you want to summarize & classify here (keep in mind very long text will take a while to process):", display_text)
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
gen_keywords = st.radio(
|
43 |
-
"Generate keywords from text?",
|
44 |
('Yes', 'No')
|
45 |
)
|
46 |
|
47 |
-
|
48 |
-
|
|
|
|
|
49 |
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
51 |
labels = list(set([x.strip() for x in labels.strip().split(',') if len(x.strip()) > 0]))
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
glabels = list(set([x.strip() for x in glabels.strip().split(',') if len(x.strip()) > 0]))
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
submit_button = st.form_submit_button(label='Submit')
|
61 |
|
62 |
st.write("_For improvments/suggestions, please file an issue here: https://github.com/pleonova/multi-label-summary-text_")
|
63 |
|
|
|
|
|
|
|
|
|
64 |
with st.spinner('Loading pretrained models...'):
|
65 |
start = time.time()
|
66 |
summarizer = md.load_summary_model()
|
@@ -74,126 +137,230 @@ with st.spinner('Loading pretrained models...'):
|
|
74 |
kw_model = md.load_keyword_model()
|
75 |
k_time = round(time.time() - start,4)
|
76 |
|
77 |
-
st.
|
|
|
78 |
|
79 |
|
80 |
if submit_button or example_button:
|
81 |
-
|
|
|
|
|
|
|
82 |
st.error("Enter some text to generate a summary")
|
83 |
else:
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
# For each body of text, create text chunks of a certain token size required for the transformer
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
|
|
|
|
|
|
|
|
107 |
st.markdown("### Summary")
|
108 |
-
with st.spinner(f'Generating summaries for {len(
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
st.error('Enter some text and at least one possible topic to see label predictions.')
|
133 |
else:
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
if len(glabels) > 0:
|
156 |
gdata = pd.DataFrame({'label': glabels})
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
df_report = pd.DataFrame(report).transpose()
|
195 |
-
st.markdown(f"Threshold set for: {threshold_value}")
|
196 |
-
st.dataframe(df_report)
|
197 |
|
198 |
st.success('All done!')
|
199 |
-
st.balloons()
|
|
|
18 |
|
19 |
|
20 |
# if __name__ == '__main__':
|
21 |
+
###################################
|
22 |
+
######## App Description ##########
|
23 |
+
###################################
|
24 |
st.markdown("### Long Text Summarization & Multi-Label Classification")
|
25 |
+
st.write("This app summarizes and then classifies your long text(s) with multiple labels using [BART Large MNLI](https://huggingface.co/facebook/bart-large-mnli). The keywords are generated using [KeyBERT](https://github.com/MaartenGr/KeyBERT).")
|
26 |
+
st.write("__Inputs__: User enters their own custom text(s) and labels.")
|
27 |
+
st.write("__Outputs__: A summary of the text, likelihood match score for each label and a downloadable csv of the results. \
|
28 |
Includes additional options to generate a list of keywords and/or evaluate results against a list of ground truth labels, if available.")
|
29 |
|
30 |
+
|
31 |
+
|
32 |
+
###################################
|
33 |
+
######## Example Input ##########
|
34 |
+
###################################
|
35 |
example_button = st.button(label='See Example')
|
36 |
if example_button:
|
37 |
example_text = ex_long_text #ex_text
|
38 |
display_text = 'Excerpt from Frankenstein:' + example_text + '"\n\n' + "[This is an excerpt from Project Gutenberg's Frankenstein. " + ex_license + "]"
|
39 |
input_labels = ex_labels
|
40 |
input_glabels = ex_glabels
|
41 |
+
title_name = 'Frankenstein, Chapter 3'
|
42 |
else:
|
43 |
display_text = ''
|
44 |
input_labels = ''
|
45 |
input_glabels = ''
|
46 |
+
title_name = 'Submitted Text'
|
47 |
+
|
48 |
|
49 |
|
50 |
with st.form(key='my_form'):
|
51 |
+
###################################
|
52 |
+
######## Form: Step 1 ##########
|
53 |
+
###################################
|
54 |
+
st.markdown("##### Step 1: Upload Text")
|
55 |
text_input = st.text_area("Input any text you want to summarize & classify here (keep in mind very long text will take a while to process):", display_text)
|
56 |
+
|
57 |
+
text_csv_expander = st.expander(label=f'Want to upload multiple texts at once? Expand to upload your text files below.', expanded=False)
|
58 |
+
with text_csv_expander:
|
59 |
+
st.markdown('##### Choose one of the options below:')
|
60 |
+
st.write("__Option A:__")
|
61 |
+
uploaded_text_files = st.file_uploader(label="Upload file(s) that end with the .txt suffix",
|
62 |
+
accept_multiple_files=True, key = 'text_uploader',
|
63 |
+
type='txt')
|
64 |
+
st.write("__Option B:__")
|
65 |
+
uploaded_csv_text_files = st.file_uploader(label='Upload a CSV file with two columns: "title" and "text"',
|
66 |
+
accept_multiple_files=False, key = 'csv_text_uploader',
|
67 |
+
type='csv')
|
68 |
+
|
69 |
+
if text_input == display_text and display_text != '':
|
70 |
+
text_input = example_text
|
71 |
+
|
72 |
gen_keywords = st.radio(
|
73 |
+
"Generate keywords from text? (independent from the input labels below)",
|
74 |
('Yes', 'No')
|
75 |
)
|
76 |
|
77 |
+
gen_summary = st.radio(
|
78 |
+
"Generate summary from text? (recommended for label matching below, but will take longer)",
|
79 |
+
('Yes', 'No')
|
80 |
+
)
|
81 |
|
82 |
+
###################################
|
83 |
+
######## Form: Step 2 ##########
|
84 |
+
###################################
|
85 |
+
st.write('\n')
|
86 |
+
st.markdown("##### Step 2: Enter Labels")
|
87 |
+
labels = st.text_input('Enter possible topic labels, which can be either keywords and/or general themes (comma-separated):',input_labels, max_chars=2000)
|
88 |
labels = list(set([x.strip() for x in labels.strip().split(',') if len(x.strip()) > 0]))
|
89 |
+
|
90 |
+
labels_csv_expander = st.expander(label=f'Prefer to upload a list of labels instead? Click here to upload your CSV file.',expanded=False)
|
91 |
+
with labels_csv_expander:
|
92 |
+
uploaded_labels_file = st.file_uploader("Choose a CSV file with one column and no header, where each cell is a separate label",
|
93 |
+
key='labels_uploader')
|
94 |
+
|
95 |
+
###################################
|
96 |
+
######## Form: Step 3 ##########
|
97 |
+
###################################
|
98 |
+
st.write('\n')
|
99 |
+
st.markdown("##### Step 3: Provide Ground Truth Labels (_Optional_)")
|
100 |
+
glabels = st.text_input('If available, enter ground truth topic labels to evaluate results, otherwise leave blank (comma-separated):',input_glabels, max_chars=2000)
|
101 |
glabels = list(set([x.strip() for x in glabels.strip().split(',') if len(x.strip()) > 0]))
|
102 |
|
103 |
+
|
104 |
+
glabels_csv_expander = st.expander(label=f'Have a file with labels for the text? Click here to upload your CSV file.', expanded=False)
|
105 |
+
with glabels_csv_expander:
|
106 |
+
st.markdown('##### Choose one of the options below:')
|
107 |
+
st.write("__Option A:__")
|
108 |
+
uploaded_onetext_glabels_file = st.file_uploader("Single Text: Choose a CSV file with one column and no header, where each cell is a separate label",
|
109 |
+
key = 'onetext_glabels_uploader')
|
110 |
+
st.write("__Option B:__")
|
111 |
+
uploaded_multitext_glabels_file = st.file_uploader('Multiple Text: Choose a CSV file with two columns "title" and "label", with the cells in the title column matching the name of the files uploaded in step #1.',
|
112 |
+
key = 'multitext_glabels_uploader')
|
113 |
+
|
114 |
+
|
115 |
+
# threshold_value = st.slider(
|
116 |
+
# 'Select a threshold cutoff for matching percentage (used for ground truth label evaluation)',
|
117 |
+
# 0.0, 1.0, (0.5))
|
118 |
|
119 |
submit_button = st.form_submit_button(label='Submit')
|
120 |
|
121 |
st.write("_For improvments/suggestions, please file an issue here: https://github.com/pleonova/multi-label-summary-text_")
|
122 |
|
123 |
+
|
124 |
+
###################################
|
125 |
+
####### Model Load Time #########
|
126 |
+
###################################
|
127 |
with st.spinner('Loading pretrained models...'):
|
128 |
start = time.time()
|
129 |
summarizer = md.load_summary_model()
|
|
|
137 |
kw_model = md.load_keyword_model()
|
138 |
k_time = round(time.time() - start,4)
|
139 |
|
140 |
+
st.spinner(f'Time taken to load various models: {k_time}s for KeyBERT model & {s_time}s for BART summarizer mnli model & {c_time}s for BART classifier mnli model.')
|
141 |
+
# st.success(None)
|
142 |
|
143 |
|
144 |
if submit_button or example_button:
|
145 |
+
###################################
|
146 |
+
######## Load Text Data #######
|
147 |
+
###################################
|
148 |
+
if len(text_input) == 0 and uploaded_text_files is None and uploaded_csv_text_files is None:
|
149 |
st.error("Enter some text to generate a summary")
|
150 |
else:
|
151 |
+
|
152 |
+
if len(text_input) != 0:
|
153 |
+
text_df = pd.DataFrame.from_dict({'title': [title_name], 'text': [text_input]})
|
154 |
+
|
155 |
+
# OPTION A
|
156 |
+
elif uploaded_text_files is not None:
|
157 |
+
st.markdown("### Text Inputs")
|
158 |
+
st.write('Files concatenated into a dataframe:')
|
159 |
+
file_names = []
|
160 |
+
raw_texts = []
|
161 |
+
for uploaded_file in uploaded_text_files:
|
162 |
+
text = str(uploaded_file.read(), "utf-8")
|
163 |
+
raw_texts.append(text)
|
164 |
+
title_file_name = uploaded_file.name.replace('.txt','')
|
165 |
+
file_names.append(title_file_name)
|
166 |
+
text_df = pd.DataFrame({'title': file_names,
|
167 |
+
'text': raw_texts})
|
168 |
+
st.dataframe(text_df.head())
|
169 |
+
st.download_button(
|
170 |
+
label="Download data as CSV",
|
171 |
+
data=text_df.to_csv().encode('utf-8'),
|
172 |
+
file_name='title_text.csv',
|
173 |
+
mime='title_text/csv',
|
174 |
+
)
|
175 |
+
# OPTION B
|
176 |
+
elif uploaded_csv_text_files is not None:
|
177 |
+
text_df = pd.read_csv(uploaded_csv_text_files)
|
178 |
+
|
179 |
+
# Which input was used? If text area was used, ignore the 'title'
|
180 |
+
if len(text_input) != 0:
|
181 |
+
title_element = []
|
182 |
+
else:
|
183 |
+
title_element = ['title']
|
184 |
+
|
185 |
+
|
186 |
+
###################################
|
187 |
+
######## Text Chunks ##########
|
188 |
+
###################################
|
189 |
+
with st.spinner('Breaking up text into more reasonable chunks (transformers cannot exceed a 1024 token max)...'):
|
190 |
# For each body of text, create text chunks of a certain token size required for the transformer
|
191 |
+
|
192 |
+
text_chunks_lib = dict()
|
193 |
+
for i in range(0, len(text_df)):
|
194 |
+
nested_sentences = md.create_nest_sentences(document=text_df['text'][i], token_max_length=1024)
|
195 |
+
|
196 |
+
# For each chunk of sentences (within the token max)
|
197 |
+
text_chunks = []
|
198 |
+
for n in range(0, len(nested_sentences)):
|
199 |
+
tc = " ".join(map(str, nested_sentences[n]))
|
200 |
+
text_chunks.append(tc)
|
201 |
+
title_entry = text_df['title'][i]
|
202 |
+
text_chunks_lib[title_entry] = text_chunks
|
203 |
+
|
204 |
+
|
205 |
+
################################
|
206 |
+
######## Keywords ##########
|
207 |
+
################################
|
208 |
+
if gen_keywords == 'Yes':
|
209 |
+
st.markdown("### Top Keywords")
|
210 |
+
with st.spinner("Generating keywords from text..."):
|
211 |
+
|
212 |
+
kw_dict = dict()
|
213 |
+
text_chunk_counter = 0
|
214 |
+
for key in text_chunks_lib:
|
215 |
+
keywords_list = []
|
216 |
+
for text_chunk in text_chunks_lib[key]:
|
217 |
+
text_chunk_counter += 1
|
218 |
+
keywords_list += md.keyword_gen(kw_model, text_chunk)
|
219 |
+
kw_dict[key] = dict(keywords_list)
|
220 |
+
# Display as a dataframe
|
221 |
+
kw_df0 = pd.DataFrame.from_dict(kw_dict).reset_index()
|
222 |
+
kw_df0.rename(columns={'index': 'keyword'}, inplace=True)
|
223 |
+
kw_df = pd.melt(kw_df0, id_vars=['keyword'], var_name='title', value_name='score').dropna()
|
224 |
+
|
225 |
+
kw_column_list = ['keyword', 'score']
|
226 |
+
kw_df = kw_df[kw_df['score'] > 0.25][title_element + kw_column_list].sort_values(title_element + ['score'], ascending=False).reset_index().drop(columns='index')
|
227 |
+
|
228 |
+
st.dataframe(kw_df)
|
229 |
+
st.download_button(
|
230 |
+
label="Download data as CSV",
|
231 |
+
data=kw_df.to_csv().encode('utf-8'),
|
232 |
+
file_name='title_keywords.csv',
|
233 |
+
mime='title_keywords/csv',
|
234 |
+
)
|
235 |
+
|
236 |
|
237 |
+
###################################
|
238 |
+
########## Summarize ##########
|
239 |
+
###################################
|
240 |
+
if gen_summary == 'Yes':
|
241 |
st.markdown("### Summary")
|
242 |
+
with st.spinner(f'Generating summaries for {len(text_df)} texts consisting of a total of {text_chunk_counter} chunks (this may take a minute)...'):
|
243 |
+
sum_dict = dict()
|
244 |
+
for i, key in enumerate(text_chunks_lib):
|
245 |
+
with st.expander(label=f'({i+1}/{len(text_df)}) Expand to see intermediate summary generation details for: {key}', expanded=False):
|
246 |
+
# for key in text_chunks_lib:
|
247 |
+
summary = []
|
248 |
+
for num_chunk, text_chunk in enumerate(text_chunks_lib[key]):
|
249 |
+
chunk_summary = md.summarizer_gen(summarizer, sequence=text_chunk, maximum_tokens=300, minimum_tokens=20)
|
250 |
+
summary.append(chunk_summary)
|
251 |
+
|
252 |
+
st.markdown(f"###### Original Text Chunk {num_chunk+1}/{len(text_chunks)}" )
|
253 |
+
st.markdown(text_chunk)
|
254 |
+
st.markdown(f"###### Partial Summary {num_chunk+1}/{len(text_chunks)}")
|
255 |
+
st.markdown(chunk_summary)
|
256 |
+
|
257 |
+
# Combine all the summaries into a list and compress into one document, again
|
258 |
+
final_summary = "\n\n".join(list(summary))
|
259 |
+
sum_dict[key] = [final_summary]
|
260 |
+
|
261 |
+
sum_df = pd.DataFrame.from_dict(sum_dict).T.reset_index()
|
262 |
+
sum_df.columns = ['title', 'summary_text']
|
263 |
+
# TO DO: Make sure summary_text does not exceed the token length
|
264 |
+
|
265 |
+
st.dataframe(sum_df)
|
266 |
+
st.download_button(
|
267 |
+
label="Download data as CSV",
|
268 |
+
data=sum_df.to_csv().encode('utf-8'),
|
269 |
+
file_name='title_summary.csv',
|
270 |
+
mime='title_summary/csv',
|
271 |
+
)
|
272 |
+
|
273 |
+
###################################
|
274 |
+
########## Classifier #########
|
275 |
+
###################################
|
276 |
+
if ((len(text_input) == 0 and uploaded_text_files is None and uploaded_csv_text_files is None)
|
277 |
+
or (len(labels) == 0 and uploaded_labels_file is None)):
|
278 |
st.error('Enter some text and at least one possible topic to see label predictions.')
|
279 |
else:
|
280 |
+
if gen_summary == 'Yes':
|
281 |
+
st.markdown("### Top Label Predictions on Summary vs Full Text")
|
282 |
+
else:
|
283 |
+
st.markdown("### Top Label Predictions on Full Text")
|
284 |
+
|
285 |
+
if uploaded_labels_file is not None:
|
286 |
+
labels_df = pd.read_csv(uploaded_labels_file, header=None)
|
287 |
+
label_list = labels_df.iloc[:, 0]
|
288 |
+
else:
|
289 |
+
label_list = labels
|
290 |
+
|
291 |
+
with st.spinner('Matching labels...(may take some time)'):
|
292 |
+
if gen_summary == 'Yes':
|
293 |
+
labels_sum_col_list = ['title', 'label', 'scores_from_summary']
|
294 |
+
labels_sum_df = pd.DataFrame(columns=labels_sum_col_list)
|
295 |
+
|
296 |
+
labels_full_col_list = ['title', 'label', 'scores_from_full_text']
|
297 |
+
labels_full_df = pd.DataFrame(columns=labels_full_col_list)
|
298 |
+
|
299 |
+
for i in range(0, len(text_df)):
|
300 |
+
if gen_summary == 'Yes':
|
301 |
+
s_topics, s_scores = md.classifier_zero(classifier, sequence=sum_df['summary_text'][i], labels=label_list, multi_class=True)
|
302 |
+
ls_df = pd.DataFrame({'label': s_topics, 'scores_from_summary': s_scores})
|
303 |
+
ls_df['title'] = text_df['title'][i]
|
304 |
+
labels_sum_df = pd.concat([labels_sum_df, ls_df[labels_sum_col_list]])
|
305 |
+
|
306 |
+
f_topics, f_scores = md.classifier_zero(classifier, sequence=text_df['text'][i], labels=label_list, multi_class=True)
|
307 |
+
lf_df = pd.DataFrame({'label': f_topics, 'scores_from_full_text': f_scores})
|
308 |
+
lf_df['title'] = text_df['title'][i]
|
309 |
+
labels_full_df = pd.concat([labels_full_df, lf_df[labels_full_col_list]])
|
310 |
+
|
311 |
+
with st.expander(f'({i+1}/{len(text_df)}) See intermediate label matching results for: {text_df["title"][i]}'):
|
312 |
+
if gen_summary == 'Yes':
|
313 |
+
st.dataframe(pd.merge(ls_df, lf_df, on=['title','label']))
|
314 |
+
else:
|
315 |
+
st.dataframe(lf_df)
|
316 |
+
|
317 |
+
if gen_summary == 'Yes':
|
318 |
+
label_match_df = pd.merge(labels_sum_df, labels_full_df, on=['title', 'label'])
|
319 |
+
else:
|
320 |
+
label_match_df = labels_full_df.copy()
|
321 |
+
|
322 |
+
###################################
|
323 |
+
####### Ground Truth Labels ######
|
324 |
+
###################################
|
325 |
if len(glabels) > 0:
|
326 |
gdata = pd.DataFrame({'label': glabels})
|
327 |
+
join_list = ['label']
|
328 |
+
elif uploaded_onetext_glabels_file is not None:
|
329 |
+
gdata = pd.read_csv(uploaded_onetext_glabels_file, header=None)
|
330 |
+
join_list = ['label']
|
331 |
+
gdata.columns = join_list
|
332 |
+
elif uploaded_multitext_glabels_file is not None:
|
333 |
+
gdata = pd.read_csv(uploaded_multitext_glabels_file)
|
334 |
+
join_list = ['title', 'label']
|
335 |
+
gdata.columns = join_list
|
336 |
+
|
337 |
+
if len(glabels) > 0 or uploaded_onetext_glabels_file is not None or uploaded_multitext_glabels_file is not None:
|
338 |
+
gdata['correct_match'] = True
|
339 |
+
label_match_df = pd.merge(label_match_df, gdata, how='left', on=join_list)
|
340 |
+
label_match_df['correct_match'].fillna(False, inplace=True)
|
341 |
+
|
342 |
+
st.dataframe(label_match_df) #.sort_values(['title', 'label'], ascending=[False, False]))
|
343 |
+
st.download_button(
|
344 |
+
label="Download data as CSV",
|
345 |
+
data=label_match_df.to_csv().encode('utf-8'),
|
346 |
+
file_name='title_label_sum_full.csv',
|
347 |
+
mime='title_label_sum_full/csv',
|
348 |
+
)
|
349 |
+
|
350 |
+
# if len(glabels) > 0:
|
351 |
+
# st.markdown("### Evaluation Metrics")
|
352 |
+
# with st.spinner('Evaluating output against ground truth...'):
|
353 |
+
#
|
354 |
+
# section_header_description = ['Summary Label Performance', 'Original Full Text Label Performance']
|
355 |
+
# data_headers = ['scores_from_summary', 'scores_from_full_text']
|
356 |
+
# for i in range(0,2):
|
357 |
+
# st.markdown(f"###### {section_header_description[i]}")
|
358 |
+
# report = classification_report(y_true = data2[['is_true_label']],
|
359 |
+
# y_pred = (data2[[data_headers[i]]] >= threshold_value) * 1.0,
|
360 |
+
# output_dict=True)
|
361 |
+
# df_report = pd.DataFrame(report).transpose()
|
362 |
+
# st.markdown(f"Threshold set for: {threshold_value}")
|
363 |
+
# st.dataframe(df_report)
|
|
|
|
|
|
|
364 |
|
365 |
st.success('All done!')
|
366 |
+
st.balloons()
|