File size: 2,765 Bytes
105ec61
082b842
5971329
 
b125ffc
 
 
5971329
b125ffc
5971329
136ac77
5971329
 
b125ffc
5971329
52fabce
96c0d6b
 
 
 
cdadc4f
52fabce
2b8f874
 
 
96c0d6b
5cfcc26
52fabce
 
cdadc4f
 
 
 
 
52fabce
cdadc4f
 
52fabce
 
5cfcc26
c81b58e
5cfcc26
cdadc4f
96c0d6b
5cfcc26
e3c30ee
96c0d6b
 
 
52fabce
a6710ea
 
96c0d6b
 
 
 
 
a6710ea
96c0d6b
 
 
 
 
 
 
 
 
a6710ea
 
 
 
96c0d6b
 
a6710ea
082b842
96c0d6b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import os
import cv2
import numpy as np
import imutils
from keras.preprocessing.image import img_to_array
from keras.models import load_model

# Load the pre-trained models and define parameters
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = 'model4_0.83/model4_entire_model.h5'
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']


# face_detector_mtcnn = MTCNN()
classifier = load_model(emotion_model_path)

def predict_emotion(frame):
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
                                            minNeighbors=5, minSize=(30, 30),
                                            flags=cv2.CASCADE_SCALE_IMAGE)

    for (fX, fY, fW, fH) in faces:
        # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
        # the ROI for classification via the CNN
        roi = gray[fY:fY + fH, fX:fX + fW]
        roi = cv2.resize(roi, (48, 48))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)

        preds = emotion_classifier.predict(roi)[0]
        label = EMOTIONS[preds.argmax()]

        # Overlay a box over the detected face
        cv2.putText(frame, label, (fX, fY - 10),
                    cv2.FONT_HERSHEY_DUPLEX, 0.5, (238, 164, 64), 1, cv2.LINE_AA)
        cv2.rectangle(frame, (fX, fY), (fX + fW, fY + fH),
                      (238, 164, 64), 2)

    return frame





demo = gr.Interface(
    fn = predict_emotion,
    inputs = gr.Image(type="numpy"),
    outputs = gr.Image(),
    # gr.components.Image(label="Predicted Emotion"),
    # gr.components.Label(num_top_classes=2, label="Top 2 Probabilities")
    #flagging_options=["blurry", "incorrect", "other"],
    examples = [
        
        os.path.join(os.path.dirname(__file__), "images/chandler.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/janice.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/joey.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/phoebe.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/rachel_monica.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/ross.jpeg"),
        os.path.join(os.path.dirname(__file__), "images/gunther.jpeg")
     
    ],
    title = "How are you feeling?",
    theme = "shivi/calm_seafoam"
)
    


if __name__ == "__main__":
    demo.launch()