import gradio as gr from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch # Load model and tokenizer model_name = "cardiffnlp/twitter-roberta-base-sentiment-latest" model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) def predict_sentiment(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512) with torch.no_grad(): outputs = model(**inputs) predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) sentiments = ['Negative', 'Neutral', 'Positive'] result = {sentiments[i]: float(predictions[0][i]) for i in range(len(sentiments))} return result def custom_theme(): """Define a custom theme for the Gradio app.""" return gr.Theme( # Define your color scheme primary='#FF6347', text_on_primary='#FFFFFF', background='#F0F8FF', card_background='#FAEBD7', text='#2F4F4F', icon='light', ) # Create Gradio interface iface = gr.Interface(fn=predict_sentiment, inputs=gr.inputs.Textbox(lines=2, placeholder="Type your sentence here..."), outputs=gr.outputs.Label(num_top_classes=3), theme=custom_theme(), title="Sentiment Analysis", description="Analyze the sentiment of your text.", article="

Enter a sentence to get its sentiment. The model categorizes sentiments into Negative, Neutral, and Positive.

") if __name__ == "__main__": iface.launch()