a-guy-from-burma's picture
Update app.py
fff4966 verified
import gradio as gr
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import load_model
import pickle
# Load the saved model
model = load_model('emotion_classifier_model.h5')
# Load the tokenizer (You need to save the tokenizer too)
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
# Define parameters for padding
max_length = 200
padding_type = 'post'
trunc_type = 'post'
# Define a function to predict emotions for a list of comments
def predict_emotions(comments):
# Convert input text to sequences
sequences = tokenizer.texts_to_sequences(comments)
padded_sequences = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# Predict emotions
predictions = model.predict(padded_sequences)
# List of emotion labels
emotion_labels = ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity',
'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear',
'gratitude', 'grief', 'joy', 'love', 'nervousness', 'neutral', 'optimism', 'pride', 'realization',
'relief', 'remorse', 'sadness', 'surprise']
# Generate human-readable predictions
result = []
for prediction in predictions:
emotion_dict = {emotion: prob for emotion, prob in zip(emotion_labels, prediction)}
# Sort emotions by probability and get top 3
top_emotions = sorted(emotion_dict.items(), key=lambda x: x[1], reverse=True)[:3]
result.append({emotion: prob for emotion, prob in top_emotions})
return result
# Create the Gradio interface
interface = gr.Interface(
fn=predict_emotions,
inputs=gr.Textbox(label="Input Comment", lines=2, placeholder="Enter your comment here...", type="text"),
outputs=gr.JSON(label="Predicted Emotions"),
title="Reddit Emotion Classifier",
description="Enter one or more comments and predict their emotion labels."
)
# Launch the app
interface.launch()