Spaces:
Sleeping
Sleeping
File size: 1,534 Bytes
bc189c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import numpy as np
# Load the model and tokenizer
model = AutoModelForSequenceClassification.from_pretrained('kietnt0603/bertweet-base-hate-speech-offensive')
tokenizer = AutoTokenizer.from_pretrained('kietnt0603/bertweet-base-hate-speech-offensive')
# Define the labels
labels = ["Hate", "Offensive", "Neither"]
# Function for prediction
def predict(inputs: str) -> Dict[str, Any]:
# Tokenize input text
inputs_dict = tokenizer(inputs, return_tensors="pt")
# Forward pass
with torch.no_grad():
outputs = model(**inputs_dict)
# Softmax to get probabilities
probabilities = torch.nn.functional.softmax(outputs.logits, dim=1)
# Get probabilities for each label
label_probabilities = {label: round(prob.item(), 4) for label, prob in zip(labels, probabilities[0].tolist())}
# Return the result
return label_probabilities
# Create title and description for the task
title = "Text Classification Demo"
description = "Classify text into categories: Hate, Offensive, Neither"
article = "Model loaded from https://huggingface.co/kietnt0603/bertweet-base-hate-speech-offensive"
# Create the Gradio interface
iface = gr.Interface(fn=predict,
inputs="textbox",
outputs="dictionary",
title=title,
description=description,
article=article)
# Launch the interface
iface.launch() |