Spaces:
Sleeping
Sleeping
File size: 2,133 Bytes
aa39cf1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import streamlit as st
import torch
from transformers import RobertaForSequenceClassification, RobertaTokenizer
import pandas as pd
def model1():
# Your Model 1 code here
st.title("Emotion Analysis")
model_path = "mymodel.pth" # Replace with the actual path to your trained model
tokenizer = RobertaTokenizer.from_pretrained(model_path)
model = RobertaForSequenceClassification.from_pretrained(model_path)
# Set the model to evaluation mode
model.eval()
# Labels for your specific task
labels = ['surprise', 'fun', 'anger', 'boredom', 'hate', 'neutral', 'worry', 'enthusiasm', 'sadness','relief', 'empty', 'happiness', 'love'] # Replace with your actual label names
# Streamlit app
user_input = st.text_area("Enter text for analysis:")
if st.button("Analyze"):
if user_input:
# Tokenize and preprocess the input
input_ids = tokenizer.encode(user_input, return_tensors="pt")
# Make prediction
with torch.no_grad():
output = model(input_ids)
# Get predicted probabilities
probabilities = torch.sigmoid(output.logits)
# Check if the lengths match before creating the DataFrame
if len(labels) == len(probabilities[0]):
# Display the probabilities as individual bars
df = pd.DataFrame({
"Label": labels,
"Probability": probabilities[0].tolist()
})
st.bar_chart(df.set_index("Label"))
# Display the emotion labels and scores
st.subheader("Emotion Analysis Output:")
for i, result in enumerate(sorted(zip(labels, probabilities[0]), key=lambda x: x[1], reverse=True)):
label, score = result
st.write(f"{i + 1}. {label.capitalize()}: {score:.4f}")
else:
st.error("Error: The length of labels and probabilities does not match.")
else:
st.warning("Please enter text for analysis.")
if __name__ == "__main__":
model1()
|