|
from transformers import AutoModelForSequenceClassification |
|
from transformers import TFAutoModelForSequenceClassification |
|
from transformers import AutoTokenizer, AutoConfig |
|
import numpy as np |
|
import gradio as gr |
|
|
|
|
|
|
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
|
|
|
|
model_path = f'Mbabazi/twitter-roberta-base-sentiment-latest' |
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_path) |
|
|
|
|
|
|
|
def predict_tweet(tweet): |
|
|
|
inputs = tokenizer(tweet, return_tensors="pt", padding=True, truncation=True, max_length=128) |
|
|
|
|
|
outputs = model(**inputs) |
|
|
|
|
|
probs = outputs.logits.softmax(dim=-1) |
|
|
|
|
|
sentiment_classes = ['Negative', 'Neutral', 'Positive'] |
|
|
|
|
|
return {sentiment_classes[i]: float(probs.squeeze()[i]) for i in range(len(sentiment_classes))} |
|
|
|
|
|
|
|
iface = gr.Interface( |
|
fn=predict_tweet, |
|
inputs="text", |
|
outputs="label", |
|
title="Tweet Sentiment Classifier", |
|
description="Enter a tweet to determine if the sentiment is negative, neutral, or positive." |
|
) |
|
|
|
iface.launch() |
|
|
|
|