Dhrumit1314
commited on
Commit
•
e5d0183
1
Parent(s):
27682e8
Upload 2 files
Browse files- FER_Detection.py +162 -0
- emotion_detection_model_50epochs.h5 +3 -0
FER_Detection.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Created on Thu Feb 29 14:22:48 2024
|
4 |
+
|
5 |
+
@author: Dhrumit Patel
|
6 |
+
"""
|
7 |
+
|
8 |
+
"""
|
9 |
+
Dataset: FER-2013
|
10 |
+
https://www.kaggle.com/datasets/msambare/fer2013
|
11 |
+
"""
|
12 |
+
|
13 |
+
from keras_preprocessing.image import ImageDataGenerator
|
14 |
+
from keras.models import Sequential
|
15 |
+
from keras.layers import Dense, Dropout, Flatten
|
16 |
+
from keras.layers import Conv2D, MaxPooling2D
|
17 |
+
import os
|
18 |
+
import matplotlib.pyplot as plt
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
IMG_HEIGHT = 48
|
22 |
+
IMG_WIDTH = 48
|
23 |
+
batch_size = 32
|
24 |
+
|
25 |
+
train_data_dir = 'data/train/'
|
26 |
+
validation_data_dir = 'data/test/'
|
27 |
+
|
28 |
+
train_datagen = ImageDataGenerator(rescale=1./255,
|
29 |
+
rotation_range=30,
|
30 |
+
shear_range=0.3,
|
31 |
+
zoom_range=0.3,
|
32 |
+
horizontal_flip=True,
|
33 |
+
fill_mode='nearest')
|
34 |
+
|
35 |
+
validation_datagen = ImageDataGenerator(rescale=1./255)
|
36 |
+
|
37 |
+
train_generator = train_datagen.flow_from_directory(train_data_dir,
|
38 |
+
color_mode='grayscale',
|
39 |
+
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
40 |
+
batch_size=batch_size,
|
41 |
+
class_mode='categorical',
|
42 |
+
shuffle=True)
|
43 |
+
|
44 |
+
validation_generator = validation_datagen.flow_from_directory(validation_data_dir,
|
45 |
+
color_mode='grayscale',
|
46 |
+
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
47 |
+
batch_size=batch_size,
|
48 |
+
class_mode='categorical',
|
49 |
+
shuffle=True)
|
50 |
+
|
51 |
+
class_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
|
52 |
+
|
53 |
+
img, label = train_generator.__next__()
|
54 |
+
|
55 |
+
import random
|
56 |
+
i = random.randint(0, (img.shape[0])-1)
|
57 |
+
image = img[i]
|
58 |
+
labl = class_labels[label[i].argmax()]
|
59 |
+
plt.imshow(image[:,:,0], cmap='gray')
|
60 |
+
plt.title(labl)
|
61 |
+
plt.show()
|
62 |
+
|
63 |
+
# Define the model
|
64 |
+
model = Sequential()
|
65 |
+
|
66 |
+
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
|
67 |
+
|
68 |
+
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
|
69 |
+
model.add(MaxPooling2D(pool_size=(2, 2)))
|
70 |
+
model.add(Dropout(0.1))
|
71 |
+
|
72 |
+
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
|
73 |
+
model.add(MaxPooling2D(pool_size=(2, 2)))
|
74 |
+
model.add(Dropout(0.1))
|
75 |
+
|
76 |
+
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
|
77 |
+
model.add(MaxPooling2D(pool_size=(2, 2)))
|
78 |
+
model.add(Dropout(0.1))
|
79 |
+
|
80 |
+
model.add(Flatten())
|
81 |
+
model.add(Dense(512, activation='relu'))
|
82 |
+
model.add(Dropout(0.2))
|
83 |
+
|
84 |
+
model.add(Dense(7, activation='softmax'))
|
85 |
+
|
86 |
+
model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics=['accuracy'])
|
87 |
+
|
88 |
+
model.summary()
|
89 |
+
|
90 |
+
from keras.utils import plot_model
|
91 |
+
plot_model(model, show_dtype=True, show_layer_names=True)
|
92 |
+
|
93 |
+
train_path = "data/train/"
|
94 |
+
test_path = "data/test"
|
95 |
+
|
96 |
+
num_train_imgs = 0
|
97 |
+
for root, dirs, files in os.walk(train_path):
|
98 |
+
num_train_imgs += len(files)
|
99 |
+
|
100 |
+
num_test_imgs = 0
|
101 |
+
for root, dirs, files in os.walk(test_path):
|
102 |
+
num_test_imgs += len(files)
|
103 |
+
|
104 |
+
history = model.fit(train_generator,
|
105 |
+
steps_per_epoch=num_train_imgs//batch_size,
|
106 |
+
epochs=50,
|
107 |
+
validation_data=validation_generator,
|
108 |
+
validation_steps=num_test_imgs//batch_size)
|
109 |
+
|
110 |
+
model.save('models/emotion_detection_model_50epochs.h5')
|
111 |
+
|
112 |
+
# Plot the training and validation accuracy and loss at each epoch
|
113 |
+
loss = history.history['loss']
|
114 |
+
val_loss = history.history['val_loss']
|
115 |
+
epochs = range(1, len(loss) + 1)
|
116 |
+
plt.plot(epochs, loss, 'y', label='Training loss')
|
117 |
+
plt.plot(epochs, val_loss, 'r', label='Validation loss')
|
118 |
+
plt.title('Training and validation loss')
|
119 |
+
plt.xlabel('Epochs')
|
120 |
+
plt.ylabel('Loss')
|
121 |
+
plt.legend()
|
122 |
+
plt.show()
|
123 |
+
|
124 |
+
acc = history.history['accuracy']
|
125 |
+
val_acc = history.history['val_accuracy']
|
126 |
+
plt.plot(epochs, acc, 'y', label='Training acc')
|
127 |
+
plt.plot(epochs, val_acc, 'r', label='Validation acc')
|
128 |
+
plt.title('Training and validation accuracy')
|
129 |
+
plt.xlabel('Epochs')
|
130 |
+
plt.ylabel('Accuracy')
|
131 |
+
plt.legend()
|
132 |
+
plt.show()
|
133 |
+
|
134 |
+
from keras.models import load_model
|
135 |
+
my_model = load_model('models/emotion_detection_model_50epochs.h5',compile=False)
|
136 |
+
|
137 |
+
# Generate a batch of images
|
138 |
+
test_img, test_lbl = validation_generator.__next__()
|
139 |
+
predictions = my_model.predict(test_img)
|
140 |
+
|
141 |
+
predictions = np.argmax(predictions, axis=1)
|
142 |
+
test_labels = np.argmax(test_lbl, axis=1)
|
143 |
+
|
144 |
+
from sklearn.metrics import accuracy_score, confusion_matrix
|
145 |
+
print(f"Accuracy: {accuracy_score(y_true=test_labels, y_pred=predictions)}")
|
146 |
+
|
147 |
+
cm = confusion_matrix(y_true=test_labels, y_pred=predictions)
|
148 |
+
cm
|
149 |
+
|
150 |
+
import seaborn as sns
|
151 |
+
sns.heatmap(cm, annot=True, fmt='d')
|
152 |
+
|
153 |
+
class_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
|
154 |
+
|
155 |
+
n = random.randint(0, test_img.shape[0] - 1)
|
156 |
+
image = test_img[n]
|
157 |
+
original_label = class_labels[test_labels[n]]
|
158 |
+
predicted_label = class_labels[predictions[n]]
|
159 |
+
plt.imshow(image[:, :, 0], cmap='gray')
|
160 |
+
plt.title(f"Original Label: {original_label} | Predicted Label: {predicted_label}")
|
161 |
+
plt.axis("off")
|
162 |
+
plt.show()
|
emotion_detection_model_50epochs.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dde478237261277246ed989749189f0eda75c8bfd9904928622ef59a8279f344
|
3 |
+
size 29933488
|