Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,37 @@
|
|
1 |
-
from detoxify import Detoxify
|
2 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from detoxify import Detoxify
|
3 |
+
|
4 |
+
# Set Streamlit app title
|
5 |
+
st.title("Clean Chat Control")
|
6 |
+
|
7 |
+
# Create a text input box for user input
|
8 |
+
input_text = st.text_area('Enter a sentence')
|
9 |
+
|
10 |
+
# Check if there is input text
|
11 |
+
if input_text:
|
12 |
+
# Add a button to trigger moderation
|
13 |
+
if st.button("Moderate"):
|
14 |
+
# Perform toxicity prediction
|
15 |
+
with st.spinner("Analyzing..."):
|
16 |
+
results = Detoxify('original').predict(input_text)
|
17 |
+
|
18 |
+
# Display the results in columns
|
19 |
+
st.header("Moderation Results:")
|
20 |
+
st.write("Toxicity: {:.2f}".format(results['toxicity']))
|
21 |
+
st.write("Severe Toxicity: {:.2f}".format(results['severe_toxicity']))
|
22 |
+
st.write("Obscene: {:.2f}".format(results['obscene']))
|
23 |
+
st.write("Threat: {:.2f}".format(results['threat']))
|
24 |
+
st.write("Insult: {:.2f}".format(results['insult']))
|
25 |
+
st.write("Identity Attack: {:.2f}".format(results['identity_attack']))
|
26 |
+
|
27 |
+
# Add a brief description of the app
|
28 |
+
st.markdown("""
|
29 |
+
This simple app helps you analyze the content of a sentence for toxicity, threats, and insults.
|
30 |
+
Enter a sentence in the text box above and click the "Moderate" button to see the results.
|
31 |
+
""")
|
32 |
|
33 |
+
# Optionally, add a footer with additional information or credits
|
34 |
+
st.markdown("""
|
35 |
+
---
|
36 |
+
Created with ❤️ by Joas
|
37 |
+
""")
|