Spaces:
Runtime error
Runtime error
File size: 2,918 Bytes
55bdad4 5a40ca9 55bdad4 03251df 55bdad4 1ce306e ee38f19 55bdad4 03251df 96db83a a8e4a85 55bdad4 8b71a2e 55bdad4 ee38f19 55bdad4 a8e4a85 55bdad4 a8e4a85 55bdad4 5a40ca9 55bdad4 1ce306e 55bdad4 3bf18ee 55bdad4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import streamlit as st
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer, TFAutoModelForSequenceClassification
# Options for models from transformers library
MODEL_OPTS = ['finetuned', 'default', 'bertweet-base-sentiment-analysis', 'twitter-roberta-base', 'distilRoberta-financial-sentiment']
FINETUNED_OPT = MODEL_OPTS[0]
DEFAULT_OPT = MODEL_OPTS[1]
# returns loaded model and tokenizer, if any
def load_model(opt):
if opt not in MODEL_OPTS: print("Incorrect model selection. Try again!")
model, tokenizer = None, None
# Load the chosen sentiment analysis model from transformers
if opt == FINETUNED_OPT:
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
model = DistilBertForSequenceClassification.from_pretrained('saccharinedreams/finetuned-distilbert-base-uncased-for-hupd')
elif opt == DEFAULT_OPT:
return model, tokenizer
elif opt == 'bertweet-base-sentiment-analysis':
tokenizer = AutoTokenizer.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
model = AutoModelForSequenceClassification.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
elif opt == 'twitter-roberta-base-sentiment':
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
elif opt == 'distilRoberta-financial-sentiment':
tokenizer = AutoTokenizer.from_pretrained("mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
model = AutoModelForSequenceClassification.from_pretrained("mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
elif not model and not tokenizer:
print("Model not loaded correctly. Try again!")
return model, tokenizer
def sentiment_analysis(model, tokenizer):
if model and tokenizer:
return pipeline('text-classification', model=model, tokenizer=tokenizer)
else: return pipeline('text-classification')
# Title the Streamlit app 'Sentiment Analysis'
st.title('Sentiment Analysis')
st.markdown('Link to the app - [sentiment-analysis-app](https://huggingface.co/spaces/saccharinedreams/sentiment-analysis-app)')
# Take in user input
user_text = st.text_input('Input text to perform sentiment analysis on here.', 'I love AI!')
# The user can interact with a dropdown menu to choose a sentiment analysis model.
dropdown_value = st.selectbox('Select one of the following sentiment analysis models', MODEL_OPTS, index=MODEL_OPTS.index(DEFAULT_OPT))
model, tokenizer = load_model(dropdown_value)
# Perform sentiment analysis on the user's input
result = sentiment_analysis(model, tokenizer)(user_text)
# Display the sentiment analysis results
st.write('Sentiment:', result[0]['label'], '; Score:', result[0]['score'])
|