|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
classifier = pipeline('text-classification', model='prabhaskenche/toxic-comment-classification-using-RoBERTa') |
|
|
|
def classify(text): |
|
results = classifier(text) |
|
|
|
non_toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_0'), 0) |
|
toxic_score = next((item['score'] for item in results[0] if item['label'] == 'LABEL_1'), 0) |
|
return f"{non_toxic_score:.3f} non-toxic, {toxic_score:.3f} toxic" |
|
|
|
|
|
interface = gr.Interface( |
|
fn=classify, |
|
inputs=gr.Textbox(lines=2, placeholder="Enter text here..."), |
|
outputs="text" |
|
) |
|
|
|
|
|
interface.launch() |
|
|