nehapasricha94's picture
Update app.py
74fb5a4 verified
raw
history blame
2.72 kB
import torch
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from transformers import pipeline
import gradio as gr
from sklearn.cluster import KMeans
# Emotion detection pipeline for text (if any text is included in assets)
emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
# Function to analyze colors in an image
def analyze_colors(image):
# Convert image to RGB
image = image.convert("RGB")
# Resize image for faster processing
image = image.resize((150, 150))
# Convert to numpy array
img_array = np.array(image)
# Reshape image to be a list of pixels
pixels = img_array.reshape((-1, 3))
# Use KMeans to find the dominant colors
kmeans = KMeans(n_clusters=5)
kmeans.fit(pixels)
dominant_colors = kmeans.cluster_centers_
# Plot the colors for visualization
plt.figure(figsize=(8, 6))
plt.imshow([dominant_colors.astype(int)])
plt.axis('off')
plt.show()
return dominant_colors
# Function to detect emotions from colors (simplified emotion-color mapping)
def color_emotion_analysis(dominant_colors):
# Map certain color ranges to emotions
emotions = []
for color in dominant_colors:
# Simple logic: darker tones could indicate sadness
if np.mean(color) < 85:
emotions.append("Sadness")
elif np.mean(color) > 170:
emotions.append("Happiness")
else:
emotions.append("Neutral")
return emotions
# Function to analyze patterns and shapes using OpenCV
def analyze_patterns(image):
# Convert to grayscale for edge detection
gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray_image, 100, 200)
# Calculate the number of edges (chaos metric)
num_edges = np.sum(edges > 0)
if num_edges > 10000: # Arbitrary threshold for "chaos"
return "Chaotic patterns - possibly distress"
else:
return "Orderly patterns - possibly calm"
# Main function to process image and analyze emotional expression
def analyze_emotion_from_image(image):
# Analyze colors
dominant_colors = analyze_colors(image)
color_emotions = color_emotion_analysis(dominant_colors)
# Analyze patterns
pattern_analysis = analyze_patterns(image)
return f"Color-based emotions: {color_emotions}\nPattern analysis: {pattern_analysis}"
# Gradio interface to upload image files and perform analysis
iface = gr.Interface(fn=analyze_emotion_from_image, inputs="image", outputs="text")
# Launch the interface
if __name__ == "__main__":
iface.launch()