Iram7866's picture
Update app.py
e8ab7c7 verified
raw
history blame contribute delete
No virus
2.34 kB
# Import streamlit and transformers
import streamlit as st
from transformers import pipeline
# Load HuggingFace model pipelines
# huggingFaceModels = {
# "roberta": "Iram7866/roberta_edos_labelled_aggregated",
# "fBert": "Iram7866/fbert_edos_labelled_aggregated",
# "hateBert": "Iram7866/hatebert_edos_labelled_aggregated",
# "berTweet": "Iram7866/bertweet_edos_labelled_aggregated",
# }
huggingFaceModels = {
"roberta": "Iram7866/roberta_edos_labelled_aggregated",
"fBert": "Iram7866/fbert_edos_labelled_aggregated",
"hateBert": "Iram7866/hatebert_edos_labelled_aggregated",
}
# Function to classify text
def classifyTextButton(text, model):
classifier = pipeline("text-classification", model=model)
# Use the model to classify the input text
results = classifier(text)
return results
###############################################################
# Streamlit app
# Title
st.title("Sexism Detection App")
# Subheader
st.subheader("By Iram Mahbub")
# Main text
st.write("Enter the text you want to classify:")
# Input box for text
textInputBox = st.text_input("Text Goes Here...")
# Model Radio Selection
# selectedModel = st.radio("Select a Model:", ["roberta", "fBert", "hateBert", "berTweet"])
selectedModel = st.radio("Select a Model:", ["roberta", "fBert", "hateBert"])
labels ={ "LABEL_0": "Not Sexist", "LABEL_1": "Sexist" }
# Button to classify text
if st.button('Classify Text'):
resultText = st.empty()
classificationResultText = st.empty()
# Display status text
statusText = st.text("Using the " + str(selectedModel) + " to detect sexism...")
# Classify the text
classificationResult = classifyTextButton(textInputBox, huggingFaceModels[selectedModel])
# Clear status text
statusText.empty()
# Display the result
resultText.write("Result:")
print(classificationResult)
# Output result
if (classificationResult[0]["label"] == "LABEL_1"):
classificationResultText.markdown(f"The text is <span style='color:red'>Sexist</span> with {classificationResult[0]['score']*100}% accuracy", unsafe_allow_html=True)
else:
classificationResultText.markdown(f"The text is <span style='color:green'>Not Sexist</span> with {classificationResult[0]['score']*100}% accuracy", unsafe_allow_html=True)