HISIA / app.py
ANON-STUDIOS-254's picture
Create app.py
7f7464c verified
import gradio as gr
import tensorflow as tf
import numpy as np
import json
# --- 1. Constants and Configuration for HISIA ---
# IMPORTANT: Replace these with the actual paths to your emotion model and class names file.
MODEL_PATH = "HISIA1.keras"
LABELS_PATH = "h_class_names.json"
IMG_SIZE = (224, 224) # Adjust if your emotion model uses a different input size (e.g., 48x48)
# --- 2. Load Emotion Model and Class Names ---
try:
with open(LABELS_PATH, 'r') as f:
# e.g., ["angry", "disgust", "fear", "happy", "neutral", "sad", "surprise"]
class_names = json.load(f)
print("βœ… Emotion class names loaded successfully.")
except FileNotFoundError:
print(f"❌ ERROR: Make sure your class names file '{LABELS_PATH}' is in the repository.")
class_names = []
try:
model = tf.keras.models.load_model(MODEL_PATH)
print(f"βœ… Emotion model loaded successfully.")
except Exception as e:
print(f"❌ ERROR: Could not load model. Make sure '{MODEL_PATH}' is in the repository.")
print(f"Error details: {e}")
model = None
# --- 3. Prediction Function (Maintained from original) ---
def classify_image(image):
"""Predicts the emotion from an input image."""
if model is None or not class_names or image is None:
if image is None:
return {"Error": "Please upload an image first."}
return {"Error": "Model or class names not loaded. Check server logs."}
img_array = np.array(image)
# Convert to grayscale if the model expects it, common for emotion models
# img_gray = tf.image.rgb_to_grayscale(img_array)
# img_resized = tf.image.resize(img_gray, IMG_SIZE)
img_resized = tf.image.resize(img_array, IMG_SIZE) # Keep as is if model takes RGB
img_batch = np.expand_dims(img_resized, axis=0)
predictions = model.predict(img_batch)[0]
confidences = {class_names[i]: float(predictions[i]) for i in range(len(class_names))}
return confidences
# --- 4. CSS for Styling (Maintained with Neobrutalist & Mid-Century Modern elements) ---
css = """
/* --- Color Palette & Font --- */
:root {
--color-background: #F4F1DE; /* Muted, warm off-white */
--color-primary: #E07A5F; /* Burnt Sienna / Terracotta */
--color-secondary: #3D405B; /* Deep Indigo */
--color-accent: #81B29A; /* Muted Teal/Green */
--color-text: #2c2c2c; /* Dark, almost-black for high contrast */
--color-border: #2c2c2c; /* Solid black for brutalist edges */
--font-main: 'Lexend Deca', 'Helvetica Neue', sans-serif;
}
/* --- Main App Styling --- */
gradio-app {
background-color: var(--color-background) !important;
font-family: var(--font-main) !important;
}
/* --- Title Section --- */
.title-container {
padding: 2rem;
margin: 1rem 1rem 2rem 1rem;
border-radius: 12px;
border: 2px solid var(--color-border);
box-shadow: 6px 6px 0px var(--color-secondary);
background: white;
}
.title-container h1 {
color: var(--color-secondary);
font-size: 3.5rem;
font-weight: 900;
text-align: center;
margin-bottom: 0.5rem;
}
.title-container p {
color: var(--color-text);
font-size: 1.1rem;
line-height: 1.5;
text-align: center;
max-width: 800px;
margin: 0 auto;
}
/* --- Component Styling (Inputs/Outputs) --- */
.gradio-container .gradio-row .gradio-column {
background: transparent !important;
border: none !important;
box-shadow: none !important;
}
.gradio-container .form {
border: 2px solid var(--color-border) !important;
border-radius: 12px !important;
box-shadow: 4px 4px 0px var(--color-border) !important;
padding: 1.5rem !important;
background: white !important;
}
/* --- Neobrutalist Animated Buttons --- */
.gradio-button {
font-family: var(--font-main) !important;
font-weight: 600 !important;
font-size: 1rem !important;
color: var(--color-text) !important;
background: var(--color-primary) !important;
border: 2px solid var(--color-border) !important;
border-radius: 8px !important;
box-shadow: 4px 4px 0px var(--color-border) !important;
transition: transform 0.15s ease-in-out, box-shadow 0.15s ease-in-out !important;
}
.gradio-button:hover {
transform: translate(-2px, -2px) !important;
box-shadow: 6px 6px 0px var(--color-border) !important;
}
.gradio-button:active {
transform: translate(2px, 2px) !important;
box-shadow: 0px 0px 0px var(--color-border) !important;
}
/* --- Specific Button Styling (Primary Action) --- */
.gradio-button.svelte-1l2i5n7.primary {
background: var(--color-accent) !important;
color: white !important;
}
/* --- Headers for Components --- */
.panel-header {
color: var(--color-secondary) !important;
font-size: 1.5rem !important;
font-weight: 700 !important;
text-align: center !important;
margin-bottom: 1rem !important;
}
"""
# --- 5. Gradio Interface Definition for HISIA ---
description = """
<b>HISIA</b> is the Swahili word for <i>'emotion'</i> or <i>'feeling'</i>.
Upload an image with a face, and this AI will analyze the visual cues to predict the dominant emotion being expressed.
"""
with gr.Blocks(css=css, theme=gr.themes.Base()) as demo:
# Neobrutalist Title Header
gr.HTML(f"""
<div class="title-container">
<h1>HISIA</h1>
<p>{description}</p>
</div>
""")
# Main layout with two columns
with gr.Row():
with gr.Column(scale=1):
gr.HTML("<h2 class='panel-header'>Upload an Image</h2>")
image_input = gr.Image(type="pil", label=None, show_label=False)
with gr.Row():
clear_btn = gr.ClearButton(
value="Clear",
components=[image_input] # Add output_label later
)
submit_btn = gr.Button(
"Analyze Emotion",
variant="primary" # This makes it the main action button
)
with gr.Column(scale=1):
gr.HTML("<h2 class='panel-header'>Emotion Analysis</h2>")
output_label = gr.Label(num_top_classes=3, label=None, show_label=False)
# Link the output to the clear button
clear_btn.add(components=[output_label])
# Define the click action for the submit button
submit_btn.click(
fn=classify_image,
inputs=image_input,
outputs=output_label
)
# Add some example images for users to try
gr.Examples(
examples=[
# IMPORTANT: You must upload these images to your HF Space for them to work.
# Create a folder named 'examples' and place your images inside.
# "examples/happy_person.jpg",
# "examples/sad_person.jpg",
# "examples/surprised_person.jpg"
],
inputs=image_input,
outputs=output_label,
fn=classify_image,
cache_examples=True # Speeds up demo for users
)
# --- 6. Launch the App ---
if __name__ == "__main__":
demo.launch(debug=True)