Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,42 +21,34 @@ emotion_classifier = load_model(emotion_model_path, compile=False)
|
|
21 |
EMOTIONS = ['neutral','happiness','surprise','sadness','anger','disgust','fear','contempt','unknown']
|
22 |
|
23 |
|
24 |
-
def predict(
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
# the ROI for classification via the CNN
|
39 |
roi = gray[fY:fY + fH, fX:fX + fW]
|
40 |
roi = cv2.resize(roi, (48, 48))
|
41 |
roi = roi.astype("float") / 255.0
|
42 |
roi = img_to_array(roi)
|
43 |
roi = np.expand_dims(roi, axis=0)
|
44 |
-
|
45 |
preds = emotion_classifier.predict(roi)[0]
|
46 |
label = EMOTIONS[preds.argmax()]
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
|
53 |
-
cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
|
54 |
-
(238, 164, 64), 2)
|
55 |
|
56 |
-
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
|
57 |
-
probs[emotion] = float(prob)
|
58 |
|
59 |
-
return frameClone, probs
|
60 |
|
61 |
# Define Gradio input and output components
|
62 |
image_input = gr.components.Image(type='numpy', label="Upload Image or Video")
|
|
|
21 |
EMOTIONS = ['neutral','happiness','surprise','sadness','anger','disgust','fear','contempt','unknown']
|
22 |
|
23 |
|
24 |
+
def predict(frame_or_path):
|
25 |
+
if isinstance(frame_or_path, np.ndarray): # If input is a webcam frame
|
26 |
+
frame = imutils.resize(frame_or_path, width=300)
|
27 |
+
else: # If input is a file path
|
28 |
+
frame = cv2.imread(frame_or_path)
|
29 |
+
if frame is None:
|
30 |
+
return None, "Error: Unable to read image or video."
|
31 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
32 |
+
faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
|
33 |
+
minNeighbors=5, minSize=(30, 30),
|
34 |
+
flags=cv2.CASCADE_SCALE_IMAGE)
|
35 |
+
if len(faces) == 0:
|
36 |
+
return frame, "No face detected."
|
37 |
+
(fX, fY, fW, fH) = faces[0]
|
|
|
38 |
roi = gray[fY:fY + fH, fX:fX + fW]
|
39 |
roi = cv2.resize(roi, (48, 48))
|
40 |
roi = roi.astype("float") / 255.0
|
41 |
roi = img_to_array(roi)
|
42 |
roi = np.expand_dims(roi, axis=0)
|
|
|
43 |
preds = emotion_classifier.predict(roi)[0]
|
44 |
label = EMOTIONS[preds.argmax()]
|
45 |
+
cv2.putText(frame, label, (fX, fY - 10),
|
46 |
+
cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
|
47 |
+
cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
|
48 |
+
(238, 164, 64), 2)
|
49 |
+
return frame, {emotion: float(prob) for emotion, prob in zip(EMOTIONS, preds)}
|
|
|
|
|
|
|
50 |
|
|
|
|
|
51 |
|
|
|
52 |
|
53 |
# Define Gradio input and output components
|
54 |
image_input = gr.components.Image(type='numpy', label="Upload Image or Video")
|