tweetpie's picture
Update app.py run the model
3343d77 verified
raw
history blame
1.79 kB
import streamlit as st
from transformers import pipeline
# Initialize the model and tokenizer once, to avoid reloading them on each user interaction
@st.cache(allow_output_mutation=True)
def load_model():
classifier = pipeline("text-classification", model="tweetpie/toxic-content-classifier")
return classifier
# Set up the title
st.title("Toxic Content Classifier Dashboard")
# Sidebar setup for configuration
st.sidebar.header("Configuration")
model_selection = st.sidebar.selectbox(
"Select a model",
options=['alm', 'blm'],
index=0 # Default selection
)
# Sidebar inputs with headers for entities and aspects
st.sidebar.header("Entities")
pro_entities = st.sidebar.text_input("Pro Entities", help="Enter pro entities separated by commas")
anti_entities = st.sidebar.text_input("Anti Entities", help="Enter anti entities separated by commas")
neutral_entities = st.sidebar.text_input("Neutral Entities", help="Enter neutral entities separated by commas")
st.sidebar.header("Aspects")
pro_aspects = st.sidebar.text_input("Pro Aspects", help="Enter pro aspects separated by commas")
anti_aspects = st.sidebar.text_input("Anti Aspects", help="Enter anti aspects separated by commas")
neutral_aspects = st.sidebar.text_input("Neutral Aspects", help="Enter neutral aspects separated by commas")
generate_button = st.sidebar.button("Generate")
# Load the model
classifier = load_model()
# Process the input text and generate output
if generate_button:
with st.spinner('Processing...'):
# Call the model with the input text
input_text = "I love you"
model_output = classifier(input_text)
# Displaying the input and model's output
st.write(f"Input Text: {input_text}")
st.write("Model Output:", model_output)