ArslanXD commited on
Commit
c0916f2
ยท
verified ยท
1 Parent(s): aef8055

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -5,24 +5,31 @@ hate = pipeline("text-classification", model="hossam87/bert-base-arabic-hate-spe
5
  dialect = pipeline("text-classification", model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", return_all_scores=False)
6
 
7
  def analyze(text):
 
8
  hate_res = hate(text)[0]
9
  hate_label = hate_res['label']
10
  hate_conf = hate_res['score']
11
 
 
12
  dial_res = dialect(text)[0]
13
  dial_label = dial_res['label']
14
  dial_conf = dial_res['score']
15
 
 
16
  weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
17
  score = hate_conf * weight.get(hate_label, 0)
18
- action = "โœ… Safe Content โ€” No harmful language detected. No moderation needed."
19
- if score > 0.8:
20
- action = "๐Ÿšจ Immediate Review Required โ€” This content contains severe hate speech or threats and should be escalated to moderators immediately."
 
21
  elif score >= 0.49:
22
  action = "โš ๏ธ Potentially Harmful โ€” The content may contain offensive or harmful language. Please review before taking further action."
 
 
23
 
24
  return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
25
 
 
26
  iface = gr.Interface(
27
  fn=analyze,
28
  inputs=gr.Textbox(lines=4, placeholder="ุงูƒุชุจ ู‡ู†ุง...", label="Arabic Text"),
 
5
  dialect = pipeline("text-classification", model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", return_all_scores=False)
6
 
7
  def analyze(text):
8
+ # Hate speech detection
9
  hate_res = hate(text)[0]
10
  hate_label = hate_res['label']
11
  hate_conf = hate_res['score']
12
 
13
+ # Dialect detection
14
  dial_res = dialect(text)[0]
15
  dial_label = dial_res['label']
16
  dial_conf = dial_res['score']
17
 
18
+ # Threat score
19
  weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
20
  score = hate_conf * weight.get(hate_label, 0)
21
+
22
+ # Recommended action (modified logic)
23
+ if hate_label != "Neutral":
24
+ action = "๐Ÿšจ Immediate Review Required โ€” This content contains severe hate speech or threats and should be escalated to moderators immediately."
25
  elif score >= 0.49:
26
  action = "โš ๏ธ Potentially Harmful โ€” The content may contain offensive or harmful language. Please review before taking further action."
27
+ else:
28
+ action = "โœ… Safe Content โ€” No harmful language detected. No moderation needed."
29
 
30
  return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
31
 
32
+
33
  iface = gr.Interface(
34
  fn=analyze,
35
  inputs=gr.Textbox(lines=4, placeholder="ุงูƒุชุจ ู‡ู†ุง...", label="Arabic Text"),