prabhaskenche's picture
Upload 9 files
9adcadd verified
raw
history blame
800 Bytes
import gradio as gr
from transformers import pipeline
# Load the model using the correct identifier
classifier = pipeline('text-classification', model='prabhaskenche/toxic-comment-classification-using-RoBERTa')
def classify(text):
results = classifier(text)
# Adjust the label names based on your model's label mapping
non_toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_0'), 0)
toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_1'), 0)
return f"{non_toxic_score:.3f} non-toxic, {toxic_score:.3f} toxic"
# Create the Gradio interface
interface = gr.Interface(
fn=classify,
inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
outputs="text"
)
# Launch the interface
interface.launch()