Update app.py
Browse files
app.py
CHANGED
@@ -129,197 +129,206 @@ if submitted:
|
|
129 |
'Others'
|
130 |
]
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
# predict
|
139 |
-
model_1 = load_model_1()
|
140 |
-
text_logits_1 = model_1(**tokenized_text_1).logits
|
141 |
-
predictions_1 = torch.softmax(text_logits_1, dim=1).tolist()[0]
|
142 |
-
predictions_1 = [round(a, 3) for a in predictions_1]
|
143 |
-
|
144 |
-
# dictionary with label as key and percentage as value
|
145 |
-
pred_dict_1 = (dict(zip(label_list_1, predictions_1)))
|
146 |
-
|
147 |
-
# sort 'pred_dict' by value and index the highest at [0]
|
148 |
-
sorted_preds_1 = sorted(pred_dict_1.items(), key=lambda x: x[1], reverse=True)
|
149 |
-
|
150 |
-
# Make dataframe for plotly bar chart
|
151 |
-
u_1, v_1 = zip(*sorted_preds_1)
|
152 |
-
x_1 = list(u_1)
|
153 |
-
y_1 = list(v_1)
|
154 |
-
df2 = pd.DataFrame()
|
155 |
-
df2['SubCatName'] = x_1
|
156 |
-
df2['Likelihood'] = y_1
|
157 |
-
|
158 |
-
|
159 |
-
# Second prediction
|
160 |
-
|
161 |
-
label_list_2 = ["False", "True"]
|
162 |
-
|
163 |
-
joined_clean_sents = prep_text(Text_entry)
|
164 |
-
|
165 |
-
# tokenize
|
166 |
-
tokenizer_2 = load_tokenizer_2()
|
167 |
-
tokenized_text_2 = tokenizer_2(joined_clean_sents, return_tensors="pt")
|
168 |
-
|
169 |
-
# predict
|
170 |
-
model_2 = load_model_2()
|
171 |
-
text_logits_2 = model_2(**tokenized_text_2).logits
|
172 |
-
predictions_2 = torch.softmax(text_logits_2, dim=1).tolist()[0]
|
173 |
-
predictions_2 = [round(a_, 3) for a_ in predictions_2]
|
174 |
-
|
175 |
-
# dictionary with label as key and percentage as value
|
176 |
-
pred_dict_2 = (dict(zip(label_list_2, predictions_2)))
|
177 |
-
|
178 |
-
# sort 'pred_dict' by value and index the highest at [0]
|
179 |
-
sorted_preds_2 = sorted(pred_dict_2.items(), key=lambda x: x[1], reverse=True)
|
180 |
-
|
181 |
-
# Make dataframe for plotly bar chart
|
182 |
-
u_2, v_2 = zip(*sorted_preds_2)
|
183 |
-
x_2 = list(u_2)
|
184 |
-
y_2 = list(v_2)
|
185 |
-
df3 = pd.DataFrame()
|
186 |
-
df3['ExtraOver'] = x_2
|
187 |
-
df3['Likelihood'] = y_2
|
188 |
-
|
189 |
-
|
190 |
-
# Third prediction
|
191 |
-
|
192 |
-
label_list_3 = ['0.04', '0.045', '0.05', '0.1', '0.15', '0.2', '1.0', '7.0', '166.67', 'Others']
|
193 |
-
|
194 |
-
joined_clean_sents = prep_text(Text_entry)
|
195 |
-
|
196 |
-
# tokenize
|
197 |
-
tokenizer_3 = load_tokenizer_3()
|
198 |
-
tokenized_text_3 = tokenizer_3(joined_clean_sents, return_tensors="pt")
|
199 |
-
|
200 |
-
# predict
|
201 |
-
model_3 = load_model_3()
|
202 |
-
text_logits_3 = model_3(**tokenized_text_3).logits
|
203 |
-
predictions_3 = torch.softmax(text_logits_3, dim=1).tolist()[0]
|
204 |
-
predictions_3 = [round(a_, 3) for a_ in predictions_3]
|
205 |
-
|
206 |
-
# dictionary with label as key and percentage as value
|
207 |
-
pred_dict_3 = (dict(zip(label_list_3, predictions_3)))
|
208 |
-
|
209 |
-
# sort 'pred_dict' by value and index the highest at [0]
|
210 |
-
sorted_preds_3 = sorted(pred_dict_3.items(), key=lambda x: x[1], reverse=True)
|
211 |
-
|
212 |
-
# Make dataframe for plotly bar chart
|
213 |
-
u_3, v_3 = zip(*sorted_preds_3)
|
214 |
-
x_3 = list(u_3)
|
215 |
-
y_3 = list(v_3)
|
216 |
-
df4 = pd.DataFrame()
|
217 |
-
df4['Conversion_factor'] = x_3
|
218 |
-
df4['Likelihood'] = y_3
|
219 |
-
|
220 |
-
|
221 |
-
st.empty()
|
222 |
-
|
223 |
-
tab1, tab2, tab3, tab4 = st.tabs(["Subcategory", "Extra Over", "Conversion Factor", "Summary"])
|
224 |
-
|
225 |
-
with tab1:
|
226 |
-
st.header("SubCatName")
|
227 |
-
# plot graph of predictions
|
228 |
-
fig = px.bar(df2, x="Likelihood", y="SubCatName", orientation="h")
|
229 |
-
|
230 |
-
fig.update_layout(
|
231 |
-
# barmode='stack',
|
232 |
-
template='ggplot2',
|
233 |
-
font=dict(
|
234 |
-
family="Arial",
|
235 |
-
size=14,
|
236 |
-
color="black"
|
237 |
-
),
|
238 |
-
autosize=False,
|
239 |
-
width=900,
|
240 |
-
height=1000,
|
241 |
-
xaxis_title="Likelihood of SubCatName",
|
242 |
-
yaxis_title="SubCatNames",
|
243 |
-
# legend_title="Topics"
|
244 |
)
|
245 |
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
#
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
)
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
#
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
'Others'
|
130 |
]
|
131 |
|
132 |
+
if Text_entry == "":
|
133 |
+
st.warning(
|
134 |
+
"""This app needs text input to generate predictions. Kindly type or paste text into
|
135 |
+
the above **"Text Input"** box""",
|
136 |
+
icon="⚠️"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
)
|
138 |
|
139 |
+
elif Text_entry != "":
|
140 |
+
|
141 |
+
joined_clean_sents = prep_text(Text_entry)
|
142 |
+
|
143 |
+
# tokenize
|
144 |
+
tokenizer_1 = load_tokenizer_1()
|
145 |
+
tokenized_text_1 = tokenizer_1(joined_clean_sents, return_tensors="pt")
|
146 |
+
|
147 |
+
# predict
|
148 |
+
model_1 = load_model_1()
|
149 |
+
text_logits_1 = model_1(**tokenized_text_1).logits
|
150 |
+
predictions_1 = torch.softmax(text_logits_1, dim=1).tolist()[0]
|
151 |
+
predictions_1 = [round(a, 3) for a in predictions_1]
|
152 |
+
|
153 |
+
# dictionary with label as key and percentage as value
|
154 |
+
pred_dict_1 = (dict(zip(label_list_1, predictions_1)))
|
155 |
+
|
156 |
+
# sort 'pred_dict' by value and index the highest at [0]
|
157 |
+
sorted_preds_1 = sorted(pred_dict_1.items(), key=lambda x: x[1], reverse=True)
|
158 |
+
|
159 |
+
# Make dataframe for plotly bar chart
|
160 |
+
u_1, v_1 = zip(*sorted_preds_1)
|
161 |
+
x_1 = list(u_1)
|
162 |
+
y_1 = list(v_1)
|
163 |
+
df2 = pd.DataFrame()
|
164 |
+
df2['SubCatName'] = x_1
|
165 |
+
df2['Likelihood'] = y_1
|
166 |
+
|
167 |
+
|
168 |
+
# Second prediction
|
169 |
+
|
170 |
+
label_list_2 = ["False", "True"]
|
171 |
+
|
172 |
+
joined_clean_sents = prep_text(Text_entry)
|
173 |
+
|
174 |
+
# tokenize
|
175 |
+
tokenizer_2 = load_tokenizer_2()
|
176 |
+
tokenized_text_2 = tokenizer_2(joined_clean_sents, return_tensors="pt")
|
177 |
+
|
178 |
+
# predict
|
179 |
+
model_2 = load_model_2()
|
180 |
+
text_logits_2 = model_2(**tokenized_text_2).logits
|
181 |
+
predictions_2 = torch.softmax(text_logits_2, dim=1).tolist()[0]
|
182 |
+
predictions_2 = [round(a_, 3) for a_ in predictions_2]
|
183 |
+
|
184 |
+
# dictionary with label as key and percentage as value
|
185 |
+
pred_dict_2 = (dict(zip(label_list_2, predictions_2)))
|
186 |
+
|
187 |
+
# sort 'pred_dict' by value and index the highest at [0]
|
188 |
+
sorted_preds_2 = sorted(pred_dict_2.items(), key=lambda x: x[1], reverse=True)
|
189 |
+
|
190 |
+
# Make dataframe for plotly bar chart
|
191 |
+
u_2, v_2 = zip(*sorted_preds_2)
|
192 |
+
x_2 = list(u_2)
|
193 |
+
y_2 = list(v_2)
|
194 |
+
df3 = pd.DataFrame()
|
195 |
+
df3['ExtraOver'] = x_2
|
196 |
+
df3['Likelihood'] = y_2
|
197 |
+
|
198 |
+
|
199 |
+
# Third prediction
|
200 |
+
|
201 |
+
label_list_3 = ['0.04', '0.045', '0.05', '0.1', '0.15', '0.2', '1.0', '7.0', '166.67', 'Others']
|
202 |
+
|
203 |
+
joined_clean_sents = prep_text(Text_entry)
|
204 |
+
|
205 |
+
# tokenize
|
206 |
+
tokenizer_3 = load_tokenizer_3()
|
207 |
+
tokenized_text_3 = tokenizer_3(joined_clean_sents, return_tensors="pt")
|
208 |
+
|
209 |
+
# predict
|
210 |
+
model_3 = load_model_3()
|
211 |
+
text_logits_3 = model_3(**tokenized_text_3).logits
|
212 |
+
predictions_3 = torch.softmax(text_logits_3, dim=1).tolist()[0]
|
213 |
+
predictions_3 = [round(a_, 3) for a_ in predictions_3]
|
214 |
+
|
215 |
+
# dictionary with label as key and percentage as value
|
216 |
+
pred_dict_3 = (dict(zip(label_list_3, predictions_3)))
|
217 |
+
|
218 |
+
# sort 'pred_dict' by value and index the highest at [0]
|
219 |
+
sorted_preds_3 = sorted(pred_dict_3.items(), key=lambda x: x[1], reverse=True)
|
220 |
+
|
221 |
+
# Make dataframe for plotly bar chart
|
222 |
+
u_3, v_3 = zip(*sorted_preds_3)
|
223 |
+
x_3 = list(u_3)
|
224 |
+
y_3 = list(v_3)
|
225 |
+
df4 = pd.DataFrame()
|
226 |
+
df4['Conversion_factor'] = x_3
|
227 |
+
df4['Likelihood'] = y_3
|
228 |
+
|
229 |
+
|
230 |
+
st.empty()
|
231 |
+
|
232 |
+
tab1, tab2, tab3, tab4 = st.tabs(["Subcategory", "Extra Over", "Conversion Factor", "Summary"])
|
233 |
+
|
234 |
+
with tab1:
|
235 |
+
st.header("SubCatName")
|
236 |
+
# plot graph of predictions
|
237 |
+
fig = px.bar(df2, x="Likelihood", y="SubCatName", orientation="h")
|
238 |
+
|
239 |
+
fig.update_layout(
|
240 |
+
# barmode='stack',
|
241 |
+
template='ggplot2',
|
242 |
+
font=dict(
|
243 |
+
family="Arial",
|
244 |
+
size=14,
|
245 |
+
color="black"
|
246 |
+
),
|
247 |
+
autosize=False,
|
248 |
+
width=900,
|
249 |
+
height=1000,
|
250 |
+
xaxis_title="Likelihood of SubCatName",
|
251 |
+
yaxis_title="SubCatNames",
|
252 |
+
# legend_title="Topics"
|
253 |
+
)
|
254 |
+
|
255 |
+
fig.update_xaxes(tickangle=0, tickfont=dict(family='Arial', color='black', size=14))
|
256 |
+
fig.update_yaxes(tickangle=0, tickfont=dict(family='Arial', color='black', size=14))
|
257 |
+
fig.update_annotations(font_size=14) # this changes y_axis, x_axis and subplot title font sizes
|
258 |
+
|
259 |
+
# Plot
|
260 |
+
st.plotly_chart(fig, use_container_width=False)
|
261 |
+
|
262 |
+
with tab2:
|
263 |
+
st.header("ExtraOver")
|
264 |
+
# plot graph of predictions
|
265 |
+
fig = px.bar(df3, x="Likelihood", y="ExtraOver", orientation="h")
|
266 |
+
|
267 |
+
fig.update_layout(
|
268 |
+
# barmode='stack',
|
269 |
+
template='ggplot2',
|
270 |
+
font=dict(
|
271 |
+
family="Arial",
|
272 |
+
size=14,
|
273 |
+
color="black"
|
274 |
+
),
|
275 |
+
autosize=False,
|
276 |
+
width=500,
|
277 |
+
height=200,
|
278 |
+
xaxis_title="Likelihood of ExtraOver",
|
279 |
+
yaxis_title="ExtraOver",
|
280 |
+
# legend_title="Topics"
|
281 |
+
)
|
282 |
+
|
283 |
+
fig.update_xaxes(tickangle=0, tickfont=dict(family='Arial', color='black', size=14))
|
284 |
+
fig.update_yaxes(tickangle=0, tickfont=dict(family='Arial', color='black', size=14))
|
285 |
+
fig.update_annotations(font_size=14) # this changes y_axis, x_axis and subplot title font sizes
|
286 |
+
|
287 |
+
# Plot
|
288 |
+
st.plotly_chart(fig, use_container_width=False)
|
289 |
+
|
290 |
+
with tab3:
|
291 |
+
st.header("Conversion_factor")
|
292 |
+
# plot graph of predictions
|
293 |
+
fig = px.bar(df4, x="Likelihood", y="Conversion_factor", orientation="h")
|
294 |
+
|
295 |
+
fig.update_layout(
|
296 |
+
# barmode='stack',
|
297 |
+
template='ggplot2',
|
298 |
+
font=dict(
|
299 |
+
family="Arial",
|
300 |
+
size=14,
|
301 |
+
color="black"
|
302 |
+
),
|
303 |
+
autosize=False,
|
304 |
+
width=500,
|
305 |
+
height=500,
|
306 |
+
xaxis_title="Likelihood of Conversion_factor",
|
307 |
+
yaxis_title="Conversion_factor",
|
308 |
+
# legend_title="Topics"
|
309 |
+
)
|
310 |
+
|
311 |
+
fig.update_xaxes(tickangle=0, tickfont=dict(family='Arial', color='black', size=14))
|
312 |
+
fig.update_yaxes(tickangle=0, tickfont=dict(family='Arial', color='black', size=14))
|
313 |
+
fig.update_annotations(font_size=14) # this changes y_axis, x_axis and subplot title font sizes
|
314 |
+
|
315 |
+
# Plot
|
316 |
+
st.plotly_chart(fig, use_container_width=False)
|
317 |
+
|
318 |
+
with tab4:
|
319 |
+
# subcatNames
|
320 |
+
st.header("")
|
321 |
+
predicted_1 = st.metric("Predicted SubCatName", sorted_preds_1[0][0])
|
322 |
+
Prediction_confidence_1 = st.metric("Prediction confidence", (str(round(sorted_preds_1[0][1] * 100, 1)) + "%"))
|
323 |
+
|
324 |
+
#ExtraOver
|
325 |
+
st.header("")
|
326 |
+
predicted_2 = st.metric("Predicted ExtraOver", sorted_preds_2[0][0])
|
327 |
+
Prediction_confidence_2 = st.metric("Prediction confidence", (str(round(sorted_preds_2[0][1] * 100, 1)) + "%"))
|
328 |
+
|
329 |
+
# Conversion_factor
|
330 |
+
st.header("")
|
331 |
+
predicted_3 = st.metric("Predicted Conversion_factor", sorted_preds_3[0][0])
|
332 |
+
Prediction_confidence_3 = st.metric("Prediction confidence", (str(round(sorted_preds_3[0][1] * 100, 1)) + "%"))
|
333 |
+
|
334 |
+
st.success("Great! Predictions successfully completed. ", icon="✅")
|