mbabazif
add Application File
59b4b59
raw history blame
No virus
1.83 kB
from transformers import AutoModelForSequenceClassification
from transformers import TFAutoModelForSequenceClassification
from transformers import AutoTokenizer, AutoConfig
import numpy as np
import gradio as gr
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# Specifying the model path, which points to the Hugging Face Model Hub
model_path = f'Mbabazi/twitter-roberta-base-sentiment-latest'
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForSequenceClassification.from_pretrained(model_path)
# Function to predict sentiment of a given tweet
def predict_tweet(tweet):
# Tokenize the input tweet using the specified tokenizer
inputs = tokenizer(tweet, return_tensors="pt", padding=True, truncation=True, max_length=128)
# Passing the tokenized input through the pre-trained sentiment analysis model
outputs = model(**inputs)
# Applying softmax to obtain probabilities for each sentiment class
probs = outputs.logits.softmax(dim=-1)
# Defining sentiment classes
sentiment_classes = ['Negative', 'Neutral', 'Positive']
# Creating a dictionary with sentiment classes as keys and their corresponding probabilities as values
return {sentiment_classes[i]: float(probs.squeeze()[i]) for i in range(len(sentiment_classes))}
# Create a Gradio Interface for the tweet sentiment prediction function
iface = gr.Interface(
fn=predict_tweet, # Set the prediction function
inputs="text", # Specify input type as text
outputs="label", # Specify output type as label
title="Tweet Sentiment Classifier", # Set the title of the interface
description="Enter a tweet to determine if the sentiment is negative, neutral, or positive." # Provide a brief description
)
iface.launch()
# with gr.Blocks() as demo: