File size: 4,097 Bytes
228a778
74fb5a4
 
 
 
 
 
 
 
 
 
 
 
 
967c1f2
785de28
731983b
785de28
 
731983b
 
785de28
731983b
785de28
 
731983b
 
 
 
 
785de28
 
 
731983b
 
 
1994e95
 
 
 
785de28
 
 
 
 
 
 
 
 
 
1994e95
785de28
731983b
785de28
e8710db
1994e95
731983b
74fb5a4
 
785de28
 
 
 
 
 
 
 
 
 
 
 
 
 
e8710db
74fb5a4
 
785de28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74fb5a4
 
 
785de28
ca568da
 
 
 
785de28
 
 
 
 
 
 
 
 
 
 
 
 
74fb5a4
ca568da
74fb5a4
 
e8710db
74fb5a4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import torch
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from transformers import pipeline
import gradio as gr
from sklearn.cluster import KMeans

# Emotion detection pipeline for text (if any text is included in assets)
emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)

# Function to analyze colors in an image
def analyze_colors(image):
    print(f"Image type: {type(image)}, Image mode: {image.mode}")  # Debugging line
    try:
        # Ensure the image is in RGB format
        if image.mode != "RGB":
            image = image.convert("RGB")

        # Resize the image for faster processing
        image = image.resize((150, 150))

        # Convert to numpy array
        img_array = np.array(image)

        # Check if the image has a valid shape
        if img_array.ndim != 3 or img_array.shape[2] != 3:
            raise ValueError("Invalid image array shape: Expected a 3D array with 3 channels.")

        # Reshape image to be a list of pixels
        pixels = img_array.reshape((-1, 3))

        print(f"Image shape: {img_array.shape}")  # Debugging line
        print(f"Number of pixels: {len(pixels)}")  # Debugging line

        if len(pixels) < 5:
            return "Image has too few pixels for analysis"

        kmeans = KMeans(n_clusters=5, random_state=0)
        kmeans.fit(pixels)
        dominant_colors = kmeans.cluster_centers_

        # Plot the colors for visualization
        plt.figure(figsize=(8, 6))
        plt.imshow([dominant_colors.astype(int)])
        plt.axis('off')
        plt.show()

        return dominant_colors

    except Exception as e:
        print(f"Error in analyze_colors: {e}")  # Capture the error
        return None



# Function to detect emotions from colors (simplified emotion-color mapping)
def color_emotion_analysis(dominant_colors):
    try:
        emotions = []
        for color in dominant_colors:
            # Simple logic: darker tones could indicate sadness
            if np.mean(color) < 85:
                emotions.append("Sadness")
            elif np.mean(color) > 170:
                emotions.append("Happiness")
            else:
                emotions.append("Neutral")
        return emotions
    except Exception as e:
        print(f"Error in color_emotion_analysis: {e}")
        return ["Error analyzing emotions"]

# Function to analyze patterns and shapes using OpenCV
def analyze_patterns(image):
    try:
        # Convert to grayscale for edge detection
        gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
        edges = cv2.Canny(gray_image, 100, 200)

        # Calculate the number of edges (chaos metric)
        num_edges = np.sum(edges > 0)

        if num_edges > 10000:  # Arbitrary threshold for "chaos"
            return "Chaotic patterns - possibly distress"
        else:
            return "Orderly patterns - possibly calm"
    except Exception as e:
        print(f"Error in analyze_patterns: {e}")
        return "Error analyzing patterns"

# Main function to process image and analyze emotional expression
def analyze_emotion_from_image(image):
    try:
        # Ensure the input image is a PIL image
        if isinstance(image, np.ndarray):
            image = Image.fromarray(image)  # Convert to PIL Image if it's a NumPy array

        # Analyze colors
        dominant_colors = analyze_colors(image)
        if dominant_colors is None:
            return "Error analyzing colors"
        
        color_emotions = color_emotion_analysis(dominant_colors)

        # Analyze patterns
        pattern_analysis = analyze_patterns(image)

        return f"Color-based emotions: {color_emotions}\nPattern analysis: {pattern_analysis}"
    except Exception as e:
        return f"Error processing image: {str(e)}"


# Gradio interface to upload image files and perform analysis
iface = gr.Interface(fn=analyze_emotion_from_image, inputs="image", outputs="text")

# Launch the interface
if __name__ == "__main__":
    iface.launch()