nehapasricha94's picture
Update app.py
408933e verified
raw
history blame
5.1 kB
import torch
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from transformers import pipeline
import gradio as gr
from sklearn.cluster import KMeans
# Emotion detection pipeline for text (if any text is included in assets)
emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
# Function to analyze colors in an image
def analyze_colors(image):
print(f"Image type: {type(image)}, Image mode: {image.mode}") # Debugging line
try:
# Ensure the image is in RGB format
if image.mode != "RGB":
image = image.convert("RGB")
# Resize the image for faster processing
image = image.resize((150, 150))
# Convert to numpy array
img_array = np.array(image)
# Check if the image has a valid shape
if img_array.ndim != 3 or img_array.shape[2] != 3:
raise ValueError("Invalid image array shape: Expected a 3D array with 3 channels.")
# Reshape image to be a list of pixels
pixels = img_array.reshape((-1, 3))
print(f"Image shape: {img_array.shape}") # Debugging line
print(f"Number of pixels: {len(pixels)}") # Debugging line
if len(pixels) < 5:
return "Image has too few pixels for analysis"
kmeans = KMeans(n_clusters=5, random_state=0)
kmeans.fit(pixels)
dominant_colors = kmeans.cluster_centers_
# Plot the colors for visualization
plt.figure(figsize=(8, 6))
plt.imshow([dominant_colors.astype(int)])
plt.axis('off')
plt.show()
return dominant_colors
except Exception as e:
print(f"Error in analyze_colors: {e}") # Capture the error
return None
# Function to detect emotions from colors (simplified emotion-color mapping)
def color_emotion_analysis(dominant_colors):
try:
emotions = []
stress_levels = []
for color in dominant_colors:
brightness = np.mean(color)
# Simple logic for emotion and stress based on brightness
if brightness < 85:
emotions.append("Sadness")
stress_levels.append("High Stress")
elif 85 <= brightness < 120:
emotions.append("Neutral")
stress_levels.append("Moderate Stress")
elif 120 <= brightness < 170:
emotions.append("Okay")
stress_levels.append("Low Stress")
elif 170 <= brightness < 200:
emotions.append("Happiness")
stress_levels.append("Very Low Stress")
else:
emotions.append("Very Happy")
stress_levels.append("No Stress")
return emotions, stress_levels
except Exception as e:
print(f"Error in color_emotion_analysis: {e}")
return ["Error analyzing emotions"], ["Error analyzing stress levels"]
# Function to analyze patterns and shapes using OpenCV
def analyze_patterns(image):
try:
# Convert to grayscale for edge detection
gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray_image, 100, 200)
# Calculate the number of edges (chaos metric)
num_edges = np.sum(edges > 0)
if num_edges > 10000: # Arbitrary threshold for "chaos"
return "Chaotic patterns - possibly distress", "High Stress"
else:
return "Orderly patterns - possibly calm", "Low Stress"
except Exception as e:
print(f"Error in analyze_patterns: {e}")
return "Error analyzing patterns", "Error analyzing stress levels"
# Main function to process image and analyze emotional expression
def analyze_emotion_from_image(image):
try:
# Ensure the input image is a PIL image
if isinstance(image, np.ndarray):
image = Image.fromarray(image) # Convert to PIL Image if it's a NumPy array
# Analyze colors
dominant_colors = analyze_colors(image)
if dominant_colors is None:
return "Error analyzing colors"
color_emotions, color_stress_levels = color_emotion_analysis(dominant_colors)
# Analyze patterns
pattern_analysis, pattern_stress_level = analyze_patterns(image)
# Combine color and pattern stress levels
overall_stress_level = pattern_stress_level if "High Stress" in color_stress_levels else "Moderate Stress"
return (f"Color-based emotions: {color_emotions}\n"
f"Color-based stress levels: {color_stress_levels}\n"
f"Pattern analysis: {pattern_analysis}\n"
f"Overall stress level: {overall_stress_level}")
except Exception as e:
return f"Error processing image: {str(e)}"
# Gradio interface to upload image files and perform analysis
iface = gr.Interface(fn=analyze_emotion_from_image, inputs="image", outputs="text")
# Launch the interface
if __name__ == "__main__":
iface.launch()