from transformers import pipeline # Load the model (use a fine-tuned model for abuse detection) classifier = pipeline("text-classification", model="unitary/toxic-bert") def analyze_text(text): results = classifier(text) # Convert to readable format final_result = { "bullying": any(res["label"] == "toxic" and res["score"] > 0.5 for res in results), "threat": any(res["label"] == "threat" and res["score"] > 0.5 for res in results), "scolding": any(res["label"] == "insult" and res["score"] > 0.5 for res in results), "abuse": any(res["label"] in ["toxic", "severe_toxic"] and res["score"] > 0.6 for res in results), "detailed_scores": results } # Make detected categories bold for key in ["bullying", "threat", "scolding", "abuse"]: if final_result[key]: final_result[key] = f"**{key.upper()} DETECTED**" return final_result