File size: 1,665 Bytes
efe939a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification

# Load the models and tokenizers
from transformers import AutoTokenizer, AutoModelForSequenceClassification

tokenizer1 = AutoTokenizer.from_pretrained("textattack/bert-base-uncased-imdb")
tokenizer2 = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
model1 = AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-imdb")
model2 = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")




# Define the sentiment prediction functions
def predict_sentiment(text):
    # Predict sentiment using model 1
    inputs1 = tokenizer1.encode_plus(text, padding="longest", truncation=True, return_tensors="pt")
    outputs1 = model1(**inputs1)
    predicted_label1 = outputs1.logits.argmax().item()
    sentiment1 = "Positive" if predicted_label1 == 1 else "Negative" if predicted_label1 == 0 else "Neutral"

    # Predict sentiment using model 2
    inputs2 = tokenizer2.encode_plus(text, padding="longest", truncation=True, return_tensors="pt")
    outputs2 = model2(**inputs2)
    predicted_label2 = outputs2.logits.argmax().item()
    sentiment2 = "Positive" if predicted_label2 == 1 else "Negative" if predicted_label2 == 0 else "Neutral"

    return sentiment1, sentiment2

# Create the Gradio interface
iface = gr.Interface(
    fn=predict_sentiment,
    inputs="text",
    outputs=["text", "text"],
    title="Sentiment Analysis (Model 1 vs Model 2)",
    description="Compare sentiment predictions from two models.",
)

# Launch the interface
iface.launch()