File size: 1,667 Bytes
9fefb12
 
 
 
 
 
 
 
 
 
 
 
fbdfc26
9fefb12
 
 
62c5caa
 
e75ccee
93ebde3
9fefb12
 
f7d8474
 
 
 
9fefb12
4f3f5b9
1e5415d
 
 
 
 
 
 
9fefb12
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import streamlit as st
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch

# Set up the device (GPU or CPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Streamlit app
def main():
    st.title("Sentiment Analysis App")
    st.write("Enter a text and select a pretrained model to perform sentiment analysis.")

    text = st.text_area("Enter text", value="I am leaving my hometown for greener pastures.")

    model_options = {
        "distilbert-base-uncased-finetuned-sst-2-english": "DistilBERT (SST-2)",
        "distilbert-base-uncased": "DistilBERT Uncased",
        "roberta-base": "RoBERTa Base",
        "albert-base-v2": "ALBERT Base v2"
        # Can add more models here if desired
    }

    # Load the pretrained model and tokenizer
    model_name = st.selectbox("Select a pretrained model", list(model_options.keys()))
    model = AutoModelForSequenceClassification.from_pretrained(model_name)
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    if st.button("Submit"):
        # Perform sentiment analysis
        inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt")
        inputs = inputs.to(device)
        outputs = model(**inputs)
        logits = outputs.logits
        probabilities = torch.softmax(logits, dim=1).detach().cpu().numpy()[0]
        sentiment_label = "Positive" if probabilities[1] > probabilities[0] else "Negative"
        st.write(f"Sentiment: {sentiment_label}")
        st.write(f"Positive probability: {probabilities[1]}")
        st.write(f"Negative probability: {probabilities[0]}")

if __name__ == "__main__":
    main()