File size: 2,070 Bytes
3b60112
 
 
 
 
 
 
c0916f2
3b60112
 
 
 
c0916f2
3b60112
 
 
 
c0916f2
3b60112
 
c0916f2
 
 
 
fc63231
aef8055
c0916f2
 
3b60112
f5fbb3f
3b60112
c0916f2
3b60112
 
 
 
 
f5fbb3f
3b60112
f5fbb3f
 
3b60112
 
 
 
 
 
 
 
f5fbb3f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
from transformers import pipeline

hate = pipeline("text-classification", model="hossam87/bert-base-arabic-hate-speech", tokenizer="hossam87/bert-base-arabic-hate-speech", return_all_scores=False)
dialect = pipeline("text-classification", model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", return_all_scores=False)

def analyze(text):
    # Hate speech detection
    hate_res = hate(text)[0]
    hate_label = hate_res['label']
    hate_conf = hate_res['score']

    # Dialect detection
    dial_res = dialect(text)[0]
    dial_label = dial_res['label']
    dial_conf = dial_res['score']

    # Threat score
    weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
    score = hate_conf * weight.get(hate_label, 0)

    # Recommended action (modified logic)
    if hate_label != "Neutral":
        action = "๐Ÿšจ Immediate Review Required โ€” This content contains severe hate speech or threats and should be escalated to moderators immediately."
    elif score >= 0.49:
        action = "โš ๏ธ Potentially Harmful โ€” The content may contain offensive or harmful language. Please review before taking further action."
    else:
        action = "โœ… Safe Content โ€” No harmful language detected. No moderation needed."

    return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action


iface = gr.Interface(
    fn=analyze,
    inputs=gr.Textbox(lines=4, placeholder="ุงูƒุชุจ ู‡ู†ุง...", label="Arabic Text"),
    outputs=[
        gr.Text(label="Hate Speech Label"),
        gr.Text(label="Confidence"),
        gr.Text(label="Dialect"),
        gr.Text(label="Confidence"),
        gr.Text(label="Threat Score"),
        gr.Text(label="Recommended Action")
    ],
    title="๐Ÿ›ก๏ธ Arabic Content Safety Analyzer",
    description="Classifies Arabic text for hate speech, detects dialect, assigns threat severity score, and recommends action.",
    theme="default"
)

iface.launch()