File size: 1,756 Bytes
7d08fa9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b259b5
7d08fa9
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
import torch
from peft import PeftModel
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np

base_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model = PeftModel.from_pretrained(base_model, "katsuchi/bert-base-uncased-twitter-sentiment-analysis")

tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")

def get_sentiment(input_sentence):
    inputs = tokenizer(input_sentence, return_tensors="pt", padding=True, truncation=True, max_length=512)
    
    inputs = {k: v.to(model.device) for k, v in inputs.items()}
    
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
    
    probabilities = torch.nn.functional.softmax(logits, dim=-1).squeeze().cpu().numpy()
    
    labels = ["Negative", "Positive"]
    
    result = {labels[i]: round(prob, 3) for i, prob in enumerate(probabilities)}
    
    return result

# Example sentences
examples = [
    ["I love this product!"],
    ["This is the worst experience ever."],
    ["The movie was okay, not great but not bad."],
    ["Absolutely terrible, do not buy!"],
    ["I feel amazing today!"]
]

iface = gr.Interface(
    fn=get_sentiment,
    inputs=gr.Textbox(label="Enter a sentence for sentiment analysis"),
    outputs=gr.JSON(label="Sentiment Probabilities"),
    title="Sentiment Analysis with Bert",
    description="Enter a sentence, and this model will predict the sentiment (positive/negative) along with the probabilities.<br><br>Check out the source code on <a href='https://github.com/katsuchi23/Twitter-Sentiment-Analysis' target='_blank'>GitHub</a>!<br><br>Here are some example sentences to test:",
    examples=examples
)

iface.launch()