import cv2 as cv
import numpy as np
import dlib
import time


detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('../dlib/shape_predictor_68_face_landmarks.dat')


def open_camera(camera_id):
    cap = cv.VideoCapture(camera_id)
    path_face_classifier = '../haarcascade/haarcascades/haarcascade_frontalface_alt.xml'
    # 分类器路径
    face_classifier = cv.CascadeClassifier(path_face_classifier)
    while cap.isOpened():
        ok, frame = cap.read()
        if not ok:
            break
        if camera_id == 0 or camera_id == 1:
            frame = cv.flip(frame, 1, dst=None)
        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        face_rects = face_classifier.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
        # 先对一个人脸进行试验
        # 先把人脸框出来，再用dlib标定关键点
        if len(face_rects) > 0:
            for face_rect in face_rects:

                # face_rect = face_rects[0]
                x, y, w, h = face_rect
                # 将人脸区域周围扩大10个像素
                # cv.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), (0, 255, 0), 1)
                gray = gray[y: y+h, x: x+w]
                # cv.imshow('gray', gray)

                size = frame.shape
                # start = time.time()
                try:
                    t1 = time.time()
                    # 传入人脸矩形框起始位置
                    image_points = get_2d_points(x, y, gray)
                    t2 = time.time()
                    t = t2 - t1
                    t = int(round(t * 1000))
                    text = 'get_2d_points:' + str(t) + 'ms'
                    cv.putText(frame, text, (20, 20), 1, 1, (255, 255, 255), 1)

                    t1 = time.time()
                    camera_matrix, dist_coeffs, rotation_vec, translation_vec = get_vector(size, image_points)
                    t2 = time.time()
                    t = t2 - t1
                    t = int(round(t * 1000))
                    text = 'get_vector:' + str(t) + 'ms'
                    cv.putText(frame, text, (20, 40), 1, 1, (255, 255, 255), 1)

                    # 通过给定的内参和外参计算三维点投影到二维图像平面上的坐标
                    # 鼻尖朝向绘制
                    t1 = time.time()
                    (nose_end, jacobian) = cv.projectPoints(np.array([(0.0, 0.0, 800.0)]),
                                                            rotation_vec, translation_vec,
                                                            camera_matrix, dist_coeffs)

                    # p1 = (int(image_points[0][0]), int(image_points[0][1]))
                    # p2 = (int(nose_end[0][0][0]), int(nose_end[0][0][1]))
                    # cv.line(frame, p1, p2, (0, 0, 255), 2)
                    # 下巴朝向绘制
                    (chin, jacobian) = cv.projectPoints(np.array([(0.0, -330.0, 800.0)]),
                                                        rotation_vec, translation_vec,
                                                        camera_matrix, dist_coeffs)
                    p3 = (int(image_points[1][0]), int(image_points[1][1]))
                    p4 = (int(chin[0][0][0]), int(chin[0][0][1]))
                    cv.line(frame, p3, p4, (0, 0, 255), 2)

                    # 左眼朝向绘制
                    (left_eye, jacobian) = cv.projectPoints(np.array([(-225.0, 170.0, 800.0)]),
                                                            rotation_vec, translation_vec,
                                                            camera_matrix, dist_coeffs)
                    p5 = (int(image_points[2][0]), int(image_points[2][1]))
                    p6 = (int(left_eye[0][0][0]), int(left_eye[0][0][1]))
                    cv.line(frame, p5, p6, (0, 0, 255), 2)

                    # 右眼朝向绘制
                    (right_eye, jacobian) = cv.projectPoints(np.array([(225.0, 170.0, 800.0)]),
                                                         rotation_vec, translation_vec,
                                                         camera_matrix, dist_coeffs)
                    p7 = (int(image_points[3][0]), int(image_points[3][1]))
                    p8 = (int(right_eye[0][0][0]), int(right_eye[0][0][1]))
                    cv.line(frame, p7, p8, (0, 0, 255), 2)
                    face_points = np.array([p3, p5, p7])
                    target_points = np.array([p4, p6, p8])
                    cv.polylines(img=frame, pts=[target_points], isClosed=True, color=(255, 255, 255), thickness=1)
                    cv.polylines(img=frame, pts=[face_points], isClosed=True, color=(0, 255, 255), thickness=1)
                    t2 = time.time()
                    t = t2 - t1
                    t = int(round(t * 1000))
                    text = 'projectPoints:' + str(t) + 'ms'
                    cv.putText(frame, text, (20, 60), 1, 1, (255, 255, 255), 1)
                    # euler_angle = euler(rotation_vec, translation_vec)
                    # cv.putText(frame, 'X: {:7.2f}'.format(euler_angle[0, 0]), (20, 80), 1, 1, (255, 255, 255), 1)
                    # cv.putText(frame, 'Y: {:7.2f}'.format(euler_angle[1, 0]), (20, 100), 1, 1, (255, 255, 255), 1)
                    # cv.putText(frame, 'Z: {:7.2f}'.format(euler_angle[2, 0]), (20, 120), 1, 1, (255, 255, 255), 1)

                except Exception as e:
                    print('not detect face points', e)
                cv.imshow('frame', frame)
                break
            c = cv.waitKey(1)
            if c & 0xFF == ord('q'):
                break
        else:
            pass
    cv.destroyAllWindows()


def get_2d_points(x, y, gray):
    # 将人脸在原图中的起始位置传过来，以便在原图上绘制
    # 调用dlib库得到人脸关键点
    rects = detector(gray, 1)
    # 人脸数len(rects)
    # print(len(rects))
    for i in range(len(rects)):
        landmarks = np.matrix([[p.x, p.y] for p in predictor(gray, rects[i]).parts()])
        image_points = np.array([
            (x + landmarks[30][0, 0], y + landmarks[30][0, 1]),  # nose tip
            (x + landmarks[8][0, 0], y + landmarks[8][0, 1]),  # chin
            (x + landmarks[36][0, 0], y + landmarks[36][0, 1]),  # left eye left corner
            (x + landmarks[45][0, 0], y + landmarks[45][0, 1]),  # right eye right corner
            (x + landmarks[48][0, 0], y + landmarks[48][0, 1]),  # left mouth corner
            (x + landmarks[54][0, 0], y + landmarks[54][0, 1]),  # right mouth corner
        ], dtype='double')
        return image_points


# 根据2d点，3d点，相机内参，畸变参数 获取旋转矩阵和平移矩阵
def get_vector(img_size, image_points):
    # 3d模型坐标
    object_model = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corner
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])
    # 焦距focal_length(相机坐标系与图像坐标系之间的距离为焦距f，也即图像坐标系原点与焦点重合)
    focal_length = img_size[1]
    center = (img_size[1] / 2, img_size[0] / 2)
    camera_matrix = np.array(
        [[focal_length, 0, center[0]],
         [0, focal_length, center[1]],
         [0, 0, 1]], dtype="double"
    )
    # camera_matrix = np.load('../camera_parameter/mtx.npy')

    # 相机外参假设为0
    dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
    # dist_coeffs = np.load('../camera_parameter/dist.npy')
    _, rotation_vector, translation_vector = cv.solvePnP(objectPoints=object_model, imagePoints=image_points,
                                                         cameraMatrix=camera_matrix, distCoeffs=dist_coeffs,
                                                         flags=cv.SOLVEPNP_ITERATIVE)
    return camera_matrix, dist_coeffs, rotation_vector, translation_vector


# 将旋转矩阵转化为欧拉角
def euler(rotation_vec, translation_vec):
    retation_mat, _ = cv.Rodrigues(rotation_vec)
    pose_mat = cv.hconcat((rotation_vec, translation_vec))
    _, _, _, _, _, _, euler_angle = cv.decomposeProjectionMatrix(pose_mat)
    return euler_angle


if __name__ == '__main__':
    video = '../video/many_people.mp4'
    open_camera(video)
