import face_recognition
import numpy as np
import cv2
from keras.models import load_model
emotion_dict= {'angry': 0, #生气
               'sad': 5, #伤心
               'midium': 4, #中性
               'disgust': 1, #厌恶
               'surprised': 6,  #惊讶
               'scared': 2, # 恐惧
               'happy': 3 # 高兴
               }

#打开摄像头的方法，window_name为显示窗口名，video_id为你设备摄像头的id，默认为0或-1，如果引用usb可能会改变为1，等
def open_video(window_name, video_id):
    cv2.namedWindow(window_name)# 创建一个窗口
    cap=cv2.VideoCapture(video_id)# 获取摄像头
    num = 1
    while cap.isOpened():
        ok, frame = cap.read() # ok表示摄像头读取状态，frame表示摄像头读取的图像
        if not ok:
            break
        cv2.imshow(window_name, frame)
        cv2.imwrite("capture.jpg", frame)
        face_rec(frame, num)
        c=cv2.waitKey(10) # 等待10ms，10ms内没有按键操作就进入下一次while循环，从而得到10ms一帧的效果，waitKey返回在键盘上按的键
        if c & 0xFF == ord('q'): # 按键q后break
            break
        num += 1
    cap.release()
    cv2.destroyWindow(window_name)
    print("cam closed")

def face_rec(frame, num):

    image = face_recognition.load_image_file("capture.jpg")

    # 载入图像
    face_locations = face_recognition.face_locations(image)
    # 寻找脸部
    top, right, bottom, left = face_locations[0]
    # 将脸部框起来

    face_image = image[top:bottom, left:right]
    face_image = cv2.resize(face_image, (48,48))
    face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
    face_image = np.reshape(face_image, [1, face_image.shape[0], face_image.shape[1], 1])
    # 调整到可以进入该模型输入的大小

    model = load_model("./model_v6_23.hdf5")
    # 载入模型

    predicted_class = np.argmax(model.predict(face_image))
    # 分类情绪
    label_map = dict((v,k) for k,v in emotion_dict.items())
    predicted_label = label_map[predicted_class]
    # 根据情绪映射表输出情绪
    print(predicted_label)
    frame = cv2.imread("capture.jpg")
    cv2.imwrite("out/" + (str)(num) + predicted_label + ".jpg", frame)

if __name__ == '__main__':
    open_video('mycam', 0)