23A475R commited on
Commit
5cfcc26
1 Parent(s): b5cd391

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -12
app.py CHANGED
@@ -24,12 +24,7 @@ def predict_emotion(frame):
24
  minNeighbors=5, minSize=(30, 30),
25
  flags=cv2.CASCADE_SCALE_IMAGE)
26
 
27
- frame_clone = frame.copy()
28
- if len(faces) > 0:
29
- faces = sorted(faces, reverse=True,
30
- key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
31
- (fX, fY, fW, fH) = faces
32
-
33
  # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
34
  # the ROI for classification via the CNN
35
  roi = gray[fY:fY + fH, fX:fX + fW]
@@ -42,15 +37,13 @@ def predict_emotion(frame):
42
  label = EMOTIONS[preds.argmax()]
43
 
44
  # Overlay a box over the detected face
45
- cv2.putText(frame_clone, label, (fX, fY - 10),
46
- cv2.FONT_HERSHEY_DUPLEX, 0.5, (238, 164, 64), 1, cv2.LINE_AA)
47
- cv2.rectangle(frame_clone, (fX, fY), (fX + fW, fY + fH),
48
  (238, 164, 64), 2)
49
 
50
- else:
51
- label = "Can't find your face"
52
 
53
- return frame_clone
54
 
55
 
56
 
 
24
  minNeighbors=5, minSize=(30, 30),
25
  flags=cv2.CASCADE_SCALE_IMAGE)
26
 
27
+ for (fX, fY, fW, fH) in faces:
 
 
 
 
 
28
  # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
29
  # the ROI for classification via the CNN
30
  roi = gray[fY:fY + fH, fX:fX + fW]
 
37
  label = EMOTIONS[preds.argmax()]
38
 
39
  # Overlay a box over the detected face
40
+ cv2.putText(frame, label, (fX, fY - 10),
41
+ cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
42
+ cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
43
  (238, 164, 64), 2)
44
 
45
+ return frame
 
46
 
 
47
 
48
 
49