Sent / app.py
PilotFrixion's picture
Create app.py
92747c8 verified
import gradio as gr
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import torch.nn.functional as F
#Initializing the tuned BERT model and tokenizer.
model = AutoModelForSequenceClassification.from_pretrained("BERTTuned")
tokenizer = AutoTokenizer.from_pretrained("Tokenizer")
def predict_sentiment(text):
#Tokenizing the input text and preparing it for the model.
inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors="pt")
#Generating predictions from the model.
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
#Converting the model logits to probabilities for easier interpretation.
probabilities = F.softmax(logits, dim=1).squeeze()
#Mapping the models output to something readable.
sentiment_mapping = {0: "Negative", 1: "Positive"}
predicted_class_index = torch.argmax(probabilities).item()
predicted_probability = probabilities[predicted_class_index].item()
predicted_sentiment = sentiment_mapping[predicted_class_index]
#Returning the predicted sentiment and probability.
return predicted_sentiment, f"{predicted_probability:.4f}"
#Setting up a Gradio interface.
iface = gr.Interface(
fn=predict_sentiment,
inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
outputs=[gr.Label(label="Predicted Sentiment"), gr.Textbox(label="Probability")],
title="Sentiment Analysis",
description="Enter a text to predict its sentiment.",
allow_flagging="never"
)
if __name__ == "__main__":
iface.launch()