import streamlit as st | |
from transformers import pipeline | |
import pandas as pd | |
# options to choose 2 models | |
option = st.selectbox( | |
'Choose your model', | |
("cardiffnlp/twitter-roberta-base-sentiment-latest", "yiyanghkust/finbert-tone")) | |
# class for toxicity | |
labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] | |
# text area to get the input text from the user | |
text = st.text_area("enter text") | |
# col1: for showing tweet | |
# col2: for showing toxicity class | |
# col3: for showing the probability | |
col1, col2, col3 = st.columns(3) | |
# display the prediction if and only if text is entered and model is chose | |
if text and option: | |
dd = { | |
"labels": labels, | |
"values": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] | |
} | |
#shows which model was used | |
st.write(f"Analyzed with {option} model") | |
#tokenizer = AutoTokenizer.from_pretrained(option) | |
#prediction = model[option].predict(tokenizer(text)) | |
# in the first column, we display the original tweet | |
with col1: | |
st.header("Original Tweet") | |
st.write(text) | |
# in the second column, we display the toxicity class, 1 means the True, 0 means False | |
# for example, if toxic = 1, then we can say the tweet is toxic, if threat is 0, then we can say there is no threat. | |
# if the value given by the prediction is above threshold, we put 1, 0 otherwise. | |
with col2: | |
st.header("Toxicity class") | |
st.write(dd) | |
# in the third and last collumn, we display the probability of each category, sorted in descending order | |
with col3: | |
st.header("Probability") | |
st.write(dd) |