Rob Caamano commited on
Commit
0dd6279
1 Parent(s): c21236a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -16
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import streamlit as st
 
2
  from transformers import AutoTokenizer
3
  from transformers import (
4
  TFAutoModelForSequenceClassification as AutoModelForSequenceClassification,
@@ -31,13 +32,6 @@ with col1:
31
  st.subheader("Tweet")
32
  text = st.text_area("Input text", demo, height=275)
33
 
34
- with col2:
35
- st.subheader("Classification")
36
-
37
- with col3:
38
- st.subheader("Probability")
39
-
40
-
41
  input = tokenizer(text, return_tensors="tf")
42
 
43
  if submit:
@@ -45,17 +39,15 @@ if submit:
45
  classes = {k: results[k] for k in results.keys() if not k == "toxic"}
46
 
47
  max_class = max(classes, key=classes.get)
 
48
 
49
- with col2:
50
- st.write(f"#### {max_class}")
 
 
 
51
 
52
- with col3:
53
- st.write(f"#### **{classes[max_class]:.2f}%**")
54
 
55
- if results["toxic"] < 0.5:
56
- st.success("This tweet is unlikely to be be toxic!")
57
- else:
58
- st.warning('This tweet is likely to be toxic.')
59
-
60
  expander = st.expander("Raw output")
61
  expander.write(results)
 
1
  import streamlit as st
2
+ import pandas as pd
3
  from transformers import AutoTokenizer
4
  from transformers import (
5
  TFAutoModelForSequenceClassification as AutoModelForSequenceClassification,
 
32
  st.subheader("Tweet")
33
  text = st.text_area("Input text", demo, height=275)
34
 
 
 
 
 
 
 
 
35
  input = tokenizer(text, return_tensors="tf")
36
 
37
  if submit:
 
39
  classes = {k: results[k] for k in results.keys() if not k == "toxic"}
40
 
41
  max_class = max(classes, key=classes.get)
42
+ probability = classes[max_class]
43
 
44
+ result_df = pd.DataFrame({
45
+ 'Classification': [max_class],
46
+ 'Probability': [probability],
47
+ 'Toxic': ['Yes' if results['toxic'] >= 0.5 else 'No']
48
+ })
49
 
50
+ st.table(result_df)
 
51
 
 
 
 
 
 
52
  expander = st.expander("Raw output")
53
  expander.write(results)