Spaces:
Runtime error
Runtime error
JulianHame
commited on
Commit
•
3880bae
1
Parent(s):
f64e64e
Collect input from user and display related toxicity information
Browse files
app.py
CHANGED
@@ -19,7 +19,49 @@ vectorizer = TextVectorization(max_tokens=MAX_FEATURES,
|
|
19 |
|
20 |
vectorizer.adapt(X.values)
|
21 |
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
res = model.predict(np.expand_dims(input_str,0))
|
24 |
classification = res[0].tolist()
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
vectorizer.adapt(X.values)
|
21 |
|
22 |
+
st.title('Toxicity Classifier')
|
23 |
+
|
24 |
+
'''
|
25 |
+
st.header('Paste the link to a tweet here:')
|
26 |
+
text = st.text_area('The toxicity of the tweet will be evaluated.',
|
27 |
+
value = "https://twitter.com/lowtiiergod/status/1592437600216035328")
|
28 |
+
'''
|
29 |
+
|
30 |
+
st.header('Write a message here:')
|
31 |
+
text = st.text_area('The toxicity of the message will be evaluated.',
|
32 |
+
value = "I hate your guts and you should die alone.")
|
33 |
+
|
34 |
+
input_str = vectorizer(text)
|
35 |
res = model.predict(np.expand_dims(input_str,0))
|
36 |
classification = res[0].tolist()
|
37 |
+
|
38 |
+
toxicity = classification[0]
|
39 |
+
toxicity_severe = classification[1]
|
40 |
+
obscene = classification[2]
|
41 |
+
threat = classification[3]
|
42 |
+
insult = classification[4]
|
43 |
+
identity_hate = classification[5]
|
44 |
+
|
45 |
+
highest_class = "Severe toxicity"
|
46 |
+
highest_class_rating = toxicity_severe
|
47 |
+
if(obscene > highest_class_rating):
|
48 |
+
highest_class = "Obscenity"
|
49 |
+
highest_class_rating = obscene
|
50 |
+
if(threat > highest_class_rating):
|
51 |
+
highest_class = "Threat"
|
52 |
+
highest_class_rating = threat
|
53 |
+
if(insult > highest_class_rating):
|
54 |
+
highest_class = "Insult"
|
55 |
+
highest_class_rating = insult
|
56 |
+
if(identity_hate > highest_class_rating):
|
57 |
+
highest_class = "Identity hate"
|
58 |
+
highest_class_rating = identity_hate
|
59 |
+
|
60 |
+
st.write("---")
|
61 |
+
st.write("Toxicity rating:")
|
62 |
+
st.write(toxicity)
|
63 |
+
st.write("---")
|
64 |
+
st.write("Highest Toxicity Class:")
|
65 |
+
st.write(highest_class)
|
66 |
+
st.write("Rating:")
|
67 |
+
st.write(highest_class_rating)
|