Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
import os
|
3 |
import cv2
|
4 |
import numpy as np
|
5 |
import imutils
|
@@ -13,64 +91,44 @@ face_detection = cv2.CascadeClassifier(detection_model_path)
|
|
13 |
emotion_classifier = load_model(emotion_model_path, compile=False)
|
14 |
EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
|
15 |
|
16 |
-
|
17 |
-
# face_detector_mtcnn = MTCNN()
|
18 |
-
classifier = load_model(emotion_model_path)
|
19 |
-
|
20 |
def predict_emotion(frame):
|
21 |
frame = imutils.resize(frame, width=300)
|
22 |
-
gray = cv2.cvtColor(frame, cv2.
|
23 |
faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
|
24 |
minNeighbors=5, minSize=(30, 30),
|
25 |
flags=cv2.CASCADE_SCALE_IMAGE)
|
26 |
|
27 |
for (fX, fY, fW, fH) in faces:
|
28 |
-
# Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
|
29 |
-
# the ROI for classification via the CNN
|
30 |
roi = gray[fY:fY + fH, fX:fX + fW]
|
31 |
roi = cv2.resize(roi, (48, 48))
|
32 |
roi = roi.astype("float") / 255.0
|
33 |
roi = img_to_array(roi)
|
34 |
roi = np.expand_dims(roi, axis=0)
|
35 |
-
|
36 |
preds = emotion_classifier.predict(roi)[0]
|
37 |
label = EMOTIONS[preds.argmax()]
|
38 |
-
|
39 |
-
# Overlay a box over the detected face
|
40 |
cv2.putText(frame, label, (fX, fY - 10),
|
41 |
cv2.FONT_HERSHEY_DUPLEX, 0.5, (238, 164, 64), 1, cv2.LINE_AA)
|
42 |
cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
|
43 |
(238, 164, 64), 2)
|
44 |
-
|
45 |
return frame
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
|
|
|
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
fn = predict_emotion,
|
53 |
-
inputs = gr.Image(type="numpy"),
|
54 |
-
outputs = gr.Image(),
|
55 |
-
# gr.components.Image(label="Predicted Emotion"),
|
56 |
-
# gr.components.Label(num_top_classes=2, label="Top 2 Probabilities")
|
57 |
-
#flagging_options=["blurry", "incorrect", "other"],
|
58 |
-
examples = [
|
59 |
-
|
60 |
-
os.path.join(os.path.dirname(__file__), "images/chandler.jpeg"),
|
61 |
-
os.path.join(os.path.dirname(__file__), "images/janice.jpeg"),
|
62 |
-
os.path.join(os.path.dirname(__file__), "images/joey.jpeg"),
|
63 |
-
os.path.join(os.path.dirname(__file__), "images/phoebe.jpeg"),
|
64 |
-
os.path.join(os.path.dirname(__file__), "images/rachel_monica.jpeg"),
|
65 |
-
os.path.join(os.path.dirname(__file__), "images/ross.jpeg"),
|
66 |
-
os.path.join(os.path.dirname(__file__), "images/gunther.jpeg")
|
67 |
-
|
68 |
-
],
|
69 |
-
title = "How are you feeling?",
|
70 |
-
theme = "shivi/calm_seafoam"
|
71 |
-
)
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
if __name__ == "__main__":
|
76 |
-
demo.launch()
|
|
|
1 |
+
# import gradio as gr
|
2 |
+
# import os
|
3 |
+
# import cv2
|
4 |
+
# import numpy as np
|
5 |
+
# import imutils
|
6 |
+
# from keras.preprocessing.image import img_to_array
|
7 |
+
# from keras.models import load_model
|
8 |
+
|
9 |
+
# # Load the pre-trained models and define parameters
|
10 |
+
# detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
|
11 |
+
# emotion_model_path = 'model4_0.83/model4_entire_model.h5'
|
12 |
+
# face_detection = cv2.CascadeClassifier(detection_model_path)
|
13 |
+
# emotion_classifier = load_model(emotion_model_path, compile=False)
|
14 |
+
# EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
|
15 |
+
|
16 |
+
|
17 |
+
# # face_detector_mtcnn = MTCNN()
|
18 |
+
# classifier = load_model(emotion_model_path)
|
19 |
+
|
20 |
+
# def predict_emotion(frame):
|
21 |
+
# frame = imutils.resize(frame, width=300)
|
22 |
+
# gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
23 |
+
# faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
|
24 |
+
# minNeighbors=5, minSize=(30, 30),
|
25 |
+
# flags=cv2.CASCADE_SCALE_IMAGE)
|
26 |
+
|
27 |
+
# for (fX, fY, fW, fH) in faces:
|
28 |
+
# # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
|
29 |
+
# # the ROI for classification via the CNN
|
30 |
+
# roi = gray[fY:fY + fH, fX:fX + fW]
|
31 |
+
# roi = cv2.resize(roi, (48, 48))
|
32 |
+
# roi = roi.astype("float") / 255.0
|
33 |
+
# roi = img_to_array(roi)
|
34 |
+
# roi = np.expand_dims(roi, axis=0)
|
35 |
+
|
36 |
+
# preds = emotion_classifier.predict(roi)[0]
|
37 |
+
# label = EMOTIONS[preds.argmax()]
|
38 |
+
|
39 |
+
# # Overlay a box over the detected face
|
40 |
+
# cv2.putText(frame, label, (fX, fY - 10),
|
41 |
+
# cv2.FONT_HERSHEY_DUPLEX, 0.5, (238, 164, 64), 1, cv2.LINE_AA)
|
42 |
+
# cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
|
43 |
+
# (238, 164, 64), 2)
|
44 |
+
|
45 |
+
# return frame
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
# demo = gr.Interface(
|
52 |
+
# fn = predict_emotion,
|
53 |
+
# inputs = gr.Image(type="numpy"),
|
54 |
+
# outputs = gr.Image(),
|
55 |
+
# # gr.components.Image(label="Predicted Emotion"),
|
56 |
+
# # gr.components.Label(num_top_classes=2, label="Top 2 Probabilities")
|
57 |
+
# #flagging_options=["blurry", "incorrect", "other"],
|
58 |
+
# examples = [
|
59 |
+
|
60 |
+
# os.path.join(os.path.dirname(__file__), "images/chandler.jpeg"),
|
61 |
+
# os.path.join(os.path.dirname(__file__), "images/janice.jpeg"),
|
62 |
+
# os.path.join(os.path.dirname(__file__), "images/joey.jpeg"),
|
63 |
+
# os.path.join(os.path.dirname(__file__), "images/phoebe.jpeg"),
|
64 |
+
# os.path.join(os.path.dirname(__file__), "images/rachel_monica.jpeg"),
|
65 |
+
# os.path.join(os.path.dirname(__file__), "images/ross.jpeg"),
|
66 |
+
# os.path.join(os.path.dirname(__file__), "images/gunther.jpeg")
|
67 |
+
|
68 |
+
# ],
|
69 |
+
# title = "How are you feeling?",
|
70 |
+
# theme = "shivi/calm_seafoam"
|
71 |
+
# )
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
# if __name__ == "__main__":
|
76 |
+
# demo.launch()
|
77 |
+
|
78 |
+
######################################################################################################################################################
|
79 |
+
|
80 |
import gradio as gr
|
|
|
81 |
import cv2
|
82 |
import numpy as np
|
83 |
import imutils
|
|
|
91 |
emotion_classifier = load_model(emotion_model_path, compile=False)
|
92 |
EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
|
93 |
|
|
|
|
|
|
|
|
|
94 |
def predict_emotion(frame):
|
95 |
frame = imutils.resize(frame, width=300)
|
96 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
97 |
faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
|
98 |
minNeighbors=5, minSize=(30, 30),
|
99 |
flags=cv2.CASCADE_SCALE_IMAGE)
|
100 |
|
101 |
for (fX, fY, fW, fH) in faces:
|
|
|
|
|
102 |
roi = gray[fY:fY + fH, fX:fX + fW]
|
103 |
roi = cv2.resize(roi, (48, 48))
|
104 |
roi = roi.astype("float") / 255.0
|
105 |
roi = img_to_array(roi)
|
106 |
roi = np.expand_dims(roi, axis=0)
|
|
|
107 |
preds = emotion_classifier.predict(roi)[0]
|
108 |
label = EMOTIONS[preds.argmax()]
|
|
|
|
|
109 |
cv2.putText(frame, label, (fX, fY - 10),
|
110 |
cv2.FONT_HERSHEY_DUPLEX, 0.5, (238, 164, 64), 1, cv2.LINE_AA)
|
111 |
cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
|
112 |
(238, 164, 64), 2)
|
|
|
113 |
return frame
|
114 |
|
115 |
+
# Define Gradio interface
|
116 |
+
inputs = [
|
117 |
+
gr.inputs.Image(type="numpy", label="Upload Image"),
|
118 |
+
gr.inputs.Video(type="numpy", label="Upload Video") # Allow for video input
|
119 |
+
]
|
120 |
+
outputs = [
|
121 |
+
gr.outputs.Video(type="numpy", label="Processed Video"), # Allow for video output
|
122 |
+
gr.Image()
|
123 |
+
]
|
124 |
+
examples = [
|
125 |
+
"images/chandler.jpeg",
|
126 |
+
"videos/input_video.mp4"
|
127 |
+
]
|
128 |
|
129 |
+
title = "Emotion Recognition"
|
130 |
+
description = "Upload an image or a video, and the model will detect emotions in faces and overlay them on the output."
|
131 |
|
132 |
+
iface = gr.Interface(fn=predict_emotion, inputs=inputs, outputs=outputs,
|
133 |
+
examples=examples, title=title, description=description)
|
134 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|