|
import gradio as gr
|
|
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
|
import torch
|
|
|
|
|
|
model = RobertaForSequenceClassification.from_pretrained("./")
|
|
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
|
|
|
|
|
|
def detect_abusive_language(text):
|
|
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
|
|
outputs = model(**inputs)
|
|
|
|
|
|
prediction = torch.argmax(outputs.logits, dim=1).item()
|
|
return "Abusive language detected!" if prediction == 1 else "No abusive language detected."
|
|
|
|
|
|
interface = gr.Interface(
|
|
fn=detect_abusive_language,
|
|
inputs="text",
|
|
outputs="text",
|
|
title="Abusive Language Detection",
|
|
description="Enter text to check if it contains abusive language."
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
interface.launch()
|
|
|