kietnt0603's picture
Update app.py
279f7c4
raw
history blame
1.6 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from typing import Dict, List, Any
import numpy as np
# Load the model and tokenizer
model = AutoModelForSequenceClassification.from_pretrained('kietnt0603/bertweet-base-hate-speech-offensive')
tokenizer = AutoTokenizer.from_pretrained('kietnt0603/bertweet-base-hate-speech-offensive')
# Define the labels
labels = ["Hate", "Offensive", "Neither"]
# Function for prediction
def predict(inputs: str) -> Dict[str, Any]:
# Tokenize input text
inputs_dict = tokenizer(inputs, return_tensors="pt")
# Forward pass
with torch.no_grad():
outputs = model(**inputs_dict)
# Softmax to get probabilities
probabilities = torch.nn.functional.softmax(outputs.logits, dim=1)
# Get probabilities for each label
label_probabilities = {label: round(prob, 4) for label, prob in zip(labels, probabilities[0].tolist())}
# Return the result
return label_probabilities
# Create title and description for the task
title = "Text Classification Demo"
description = "Classify text into categories: Hate, Offensive, Neither"
article = "Model loaded from https://huggingface.co/kietnt0603/bertweet-base-hate-speech-offensive"
# Create the Gradio interface
iface = gr.Interface(fn=predict,
inputs="textbox",
outputs=gr.Label(num_top_classes=3, label='Predictions'),
title=title,
description=description,
article=article)
# Launch the interface
iface.launch()