ArslanXD commited on
Commit
f5fbb3f
·
verified ·
1 Parent(s): fcd93cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -21
app.py CHANGED
@@ -15,36 +15,23 @@ def analyze(text):
15
 
16
  weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
17
  score = hate_conf * weight.get(hate_label, 0)
18
-
19
- keys_list = ["Offensive", "Sexism", "Racism", "Religious Discrimination"]
20
-
21
- action = "✅ Safe Content: No significant signs of hate speech detected. No further action required."
22
-
23
- if score > 0.8 or hate_label in keys_list:
24
- action = "🚨 Critical Content Alert: This text is highly likely to contain severe hate speech. Immediate review and removal is strongly advised."
25
  elif score >= 0.49:
26
- action = "⚠️ Potentially Harmful Content: This text may contain offensive or discriminatory language. Please have a moderator review it."
27
-
28
- # Convert all scores to percentages
29
- return (
30
- hate_label,
31
- f"{hate_conf * 100:.2f}%",
32
- dial_label,
33
- f"{dial_conf * 100:.2f}%",
34
- f"{score * 100:.2f}%",
35
- action
36
- )
37
 
 
38
 
39
  iface = gr.Interface(
40
  fn=analyze,
41
  inputs=gr.Textbox(lines=4, placeholder="اكتب هنا...", label="Arabic Text"),
42
  outputs=[
43
  gr.Text(label="Hate Speech Label"),
44
- gr.Text(label="Confidence (%)"),
45
  gr.Text(label="Dialect"),
46
- gr.Text(label="Confidence (%)"),
47
- gr.Text(label="Threat Score (%)"),
48
  gr.Text(label="Recommended Action")
49
  ],
50
  title="🛡️ Arabic Content Safety Analyzer",
@@ -53,3 +40,6 @@ iface = gr.Interface(
53
  )
54
 
55
  iface.launch()
 
 
 
 
15
 
16
  weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
17
  score = hate_conf * weight.get(hate_label, 0)
18
+ action = "No Action Needed"
19
+ if score > 0.8:
20
+ action = "Immediate Review"
 
 
 
 
21
  elif score >= 0.49:
22
+ action = "Flag for Moderator"
 
 
 
 
 
 
 
 
 
 
23
 
24
+ return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
25
 
26
  iface = gr.Interface(
27
  fn=analyze,
28
  inputs=gr.Textbox(lines=4, placeholder="اكتب هنا...", label="Arabic Text"),
29
  outputs=[
30
  gr.Text(label="Hate Speech Label"),
31
+ gr.Text(label="Confidence"),
32
  gr.Text(label="Dialect"),
33
+ gr.Text(label="Confidence"),
34
+ gr.Text(label="Threat Score"),
35
  gr.Text(label="Recommended Action")
36
  ],
37
  title="🛡️ Arabic Content Safety Analyzer",
 
40
  )
41
 
42
  iface.launch()
43
+
44
+
45
+