Update app.py
Browse files
app.py
CHANGED
|
@@ -15,11 +15,11 @@ def analyze(text):
|
|
| 15 |
|
| 16 |
weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
|
| 17 |
score = hate_conf * weight.get(hate_label, 0)
|
| 18 |
-
action = "No
|
| 19 |
if score > 0.8:
|
| 20 |
-
action =
|
| 21 |
elif score >= 0.49:
|
| 22 |
-
action = "
|
| 23 |
|
| 24 |
return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
|
| 25 |
|
|
|
|
| 15 |
|
| 16 |
weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
|
| 17 |
score = hate_conf * weight.get(hate_label, 0)
|
| 18 |
+
action = "β
Safe Content β No harmful language detected. No moderation needed."
|
| 19 |
if score > 0.8:
|
| 20 |
+
action = "π¨ Immediate Review Required β This content contains severe hate speech or threats and should be escalated to moderators immediately."
|
| 21 |
elif score >= 0.49:
|
| 22 |
+
action = "β οΈ Potentially Harmful β The content may contain offensive or harmful language. Please review before taking further action."
|
| 23 |
|
| 24 |
return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
|
| 25 |
|