23A475R commited on
Commit
6458128
1 Parent(s): fbd83eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -27
app.py CHANGED
@@ -21,42 +21,34 @@ emotion_classifier = load_model(emotion_model_path, compile=False)
21
  EMOTIONS = ['neutral','happiness','surprise','sadness','anger','disgust','fear','contempt','unknown']
22
 
23
 
24
- def predict(frame):
25
-
26
- frame = imutils.resize(frame, width=300)
27
- gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
28
- faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
29
- minNeighbors=5, minSize=(30, 30),
30
- flags=cv2.CASCADE_SCALE_IMAGE)
31
-
32
- frameClone = frame.copy()
33
- if len(faces) > 0:
34
- faces = sorted(faces, reverse=True,
35
- key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
36
- (fX, fY, fW, fH) = faces
37
- # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
38
- # the ROI for classification via the CNN
39
  roi = gray[fY:fY + fH, fX:fX + fW]
40
  roi = cv2.resize(roi, (48, 48))
41
  roi = roi.astype("float") / 255.0
42
  roi = img_to_array(roi)
43
  roi = np.expand_dims(roi, axis=0)
44
-
45
  preds = emotion_classifier.predict(roi)[0]
46
  label = EMOTIONS[preds.argmax()]
47
- else:
48
- return frameClone, "Can't find your face"
49
-
50
- probs = {}
51
- cv2.putText(frameClone, label, (fX, fY - 10),
52
- cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
53
- cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
54
- (238, 164, 64), 2)
55
 
56
- for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
57
- probs[emotion] = float(prob)
58
 
59
- return frameClone, probs
60
 
61
  # Define Gradio input and output components
62
  image_input = gr.components.Image(type='numpy', label="Upload Image or Video")
 
21
  EMOTIONS = ['neutral','happiness','surprise','sadness','anger','disgust','fear','contempt','unknown']
22
 
23
 
24
+ def predict(frame_or_path):
25
+ if isinstance(frame_or_path, np.ndarray): # If input is a webcam frame
26
+ frame = imutils.resize(frame_or_path, width=300)
27
+ else: # If input is a file path
28
+ frame = cv2.imread(frame_or_path)
29
+ if frame is None:
30
+ return None, "Error: Unable to read image or video."
31
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
32
+ faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
33
+ minNeighbors=5, minSize=(30, 30),
34
+ flags=cv2.CASCADE_SCALE_IMAGE)
35
+ if len(faces) == 0:
36
+ return frame, "No face detected."
37
+ (fX, fY, fW, fH) = faces[0]
 
38
  roi = gray[fY:fY + fH, fX:fX + fW]
39
  roi = cv2.resize(roi, (48, 48))
40
  roi = roi.astype("float") / 255.0
41
  roi = img_to_array(roi)
42
  roi = np.expand_dims(roi, axis=0)
 
43
  preds = emotion_classifier.predict(roi)[0]
44
  label = EMOTIONS[preds.argmax()]
45
+ cv2.putText(frame, label, (fX, fY - 10),
46
+ cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
47
+ cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
48
+ (238, 164, 64), 2)
49
+ return frame, {emotion: float(prob) for emotion, prob in zip(EMOTIONS, preds)}
 
 
 
50
 
 
 
51
 
 
52
 
53
  # Define Gradio input and output components
54
  image_input = gr.components.Image(type='numpy', label="Upload Image or Video")