SaviAnna commited on
Commit
de6ef4f
1 Parent(s): f22cd89

Update pages/🤢 You_are_toxic.py

Browse files
Files changed (1) hide show
  1. pages/🤢 You_are_toxic.py +13 -13
pages/🤢 You_are_toxic.py CHANGED
@@ -103,17 +103,17 @@ outputs = model_bert(**inputs)
103
  prediction_bert = torch.nn.functional.softmax(outputs.logits, dim=1)
104
  prediction_bert = torch.argmax(prediction_bert, dim=1).numpy()
105
  st.write("Comment by ML model:", user_review)
106
-
107
- if prediction == 0:
108
- st.markdown("<p style='color: green;'>Non-toxic comment</p>", unsafe_allow_html=True)
109
- else:
110
- st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)
111
- st.write("Comment by RuBERT:", user_review)
112
-
113
- if prediction_bert == 0:
114
- st.markdown("<p style='color: green;'>Controversial comment</p>", unsafe_allow_html=True)
115
- elif prediction_bert == 1:
116
- st.markdown("<p style='color: red;'>Non-toxic comment</p>", unsafe_allow_html=True)
117
- else:
118
- st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)
119
 
 
103
  prediction_bert = torch.nn.functional.softmax(outputs.logits, dim=1)
104
  prediction_bert = torch.argmax(prediction_bert, dim=1).numpy()
105
  st.write("Comment by ML model:", user_review)
106
+ if user_review.strip():
107
+ if prediction == 0:
108
+ st.markdown("<p style='color: green;'>Non-toxic comment</p>", unsafe_allow_html=True)
109
+ else:
110
+ st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)
111
+ st.write("Comment by RuBERT:", user_review)
112
+
113
+ if prediction_bert == 0:
114
+ st.markdown("<p style='color: green;'>Controversial comment</p>", unsafe_allow_html=True)
115
+ elif prediction_bert == 1:
116
+ st.markdown("<p style='color: red;'>Non-toxic comment</p>", unsafe_allow_html=True)
117
+ else:
118
+ st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)
119