File size: 956 Bytes
dd76fbf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from transformers import pipeline

# Load the model (use a fine-tuned model for abuse detection)
classifier = pipeline("text-classification", model="unitary/toxic-bert")

def analyze_text(text):
    results = classifier(text)
    
    # Convert to readable format
    final_result = {
        "bullying": any(res["label"] == "toxic" and res["score"] > 0.5 for res in results),
        "threat": any(res["label"] == "threat" and res["score"] > 0.5 for res in results),
        "scolding": any(res["label"] == "insult" and res["score"] > 0.5 for res in results),
        "abuse": any(res["label"] in ["toxic", "severe_toxic"] and res["score"] > 0.6 for res in results),
        "detailed_scores": results
    }
    # Make detected categories bold
    for key in ["bullying", "threat", "scolding", "abuse"]:
        if final_result[key]:
            final_result[key] = f"**{key.upper()} DETECTED**"
            
    return final_result