from datetime import datetime
import base64
from PIL import Image
from io import BytesIO
import numpy as np
import threading
import queue
import random
import cv2
import math
import time
import sys
import os

sys.path.append(os.path.join(os.path.dirname(__file__), 'RetinaFace'))

# from model.RetinaFace.retinaface import RetinaFace
from model.RetinaFace.retinaface_cov import RetinaFaceCoV

sys.path.append('../face_recog/insightface/src/common')
from face_recog.insightface.src.common import face_preprocess

class RetinaDetectFaceModel():

    def __init__(self, dictProperties):
        args = {}
        args['image_size'] = '112,112'
        args['gpu'] = int(dictProperties.get('retina_detect_gpu'))
        args['det'] = 2
        args['flip'] = 0
        args['threshold'] = 1.24

        args['model_det'] = 'model/retina/mnet_cov2'
        self.init_model(args)

    def init_model(self, args):

        self.args = args
        _vec = args['image_size'].split(',')
        assert len(_vec) == 2
        image_size = (int(_vec[0]), int(_vec[1]))
        self.model = None
        self.ga_model = None

        self.threshold = args['threshold']
        self.det_minsize = 50
        self.det_threshold = [0.6, 0.7, 0.8]
        # self.det_factor = 0.9
        self.image_size = image_size

        self.mask_thresh = 0.2

        image_presize = [(100, 100, 3), (200, 200, 3), (300, 300, 3), (400, 400, 3), (500, 500, 3), (600, 600, 3), (700, 700, 3), (800, 800, 3)]
        # self.detector = RetinaFace(args['model_det'], 0, args['gpu'], 'net3')
        self.detector = RetinaFaceCoV(args['model_det'], 0, args['gpu'], 'net3l')

        if args['gpu'] >= 0:  # i == 0 and
            # 初始化模型，mxnet, cuda会优化选择卷积层，很耗时，先初始化好
            for test_img in image_presize:
                self.detector.detect(np.zeros(test_img))

    def get_detector(self):
        return self.detector

    def detect(self, img):
        try:
            feature = self.detector.detect(img)
            print('---success')
            return None, feature
        except Exception as e1:
            print('RetinaDetectFaceModel???????????????????????error')
            return (90099, '服务超时'), None


    def detect_faces(self, img):
        t1 = time.time()
        # faces, points = self.detector.detect(img)
        im_shape = img.shape
        error, result = self.detect(img)
        if error:
            return error, None

        faces, points = result
        print('----retina time:',time.time() - t1)
        # if faces is None or faces.shape[0] == 0:
        #     return (90005, '检测不到人脸'), None, None
        # if faces.shape[0] > 1:
        #     return (90004,'检测到多个人脸'), None, None
        res =[]
        for i in range(faces.shape[0]):
            face = faces[i]
            boxes = face[ :4].astype(np.int)
            # print(boxes)
            w, h = boxes[2] - boxes[0], boxes[3] - boxes[1]
            print('w=%s,h=%s' % (w, h))
            box_val =[int(boxes[0]), int(boxes[1]), int(boxes[2] - boxes[0]), int(boxes[3] - boxes[1])]
            # face_aligned =img[ boxes[1]:boxes[3],boxes[0]:boxes[2]]
            # if w < 112 or h < 112:
            #     return (90006,'人像分辨率太低'),None, None
            #
            # img = np.transpose(face_aligned, (1, 2, 0))
            # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            # img = cv2.resize(img,(112,112))
            mask = face[5]
            if mask >= self.mask_thresh:
                is_mask = True
            else:
                is_mask = False

            landmark_val = points[i]
            # 左眼0 x, y
            # 右眼1
            # 鼻子2
            # 左嘴角3
            # 右嘴角4
            image_points = np.array([
                (landmark_val[2][0], landmark_val[2][1]),  # Nose tip
                (landmark_val[2][0], landmark_val[2][1]),  # Chin
                (landmark_val[0][0], landmark_val[0][1]),  # Left eye left corner
                (landmark_val[1][0], landmark_val[1][1]),  # Right eye right corne
                (landmark_val[3][0], landmark_val[3][1]),  # Left Mouth corner
                (landmark_val[4][0], landmark_val[4][1])  # Right mouth corner
            ], dtype="double")

            ret, rotation_vector, translation_vector, camera_matrix, dist_coeffs = self.get_pose_estimation(im_shape,
                                                                                                            image_points)
            # print(rotation_vector)
            ret, pitch, yaw, roll = self.get_euler_angle(rotation_vector)

            # bbox = face[ :4]
            # point = points[i, :].reshape((2, 5)).T
            # # print(bbox)
            # # print(points)
            # nimg = face_preprocess.preprocess(img, bbox, point, image_size='112,112')

            landmark5 = landmark_val.astype(np.int)
            landmark55 = []
            for li in landmark5:
                landmark55.append(list(li)[0])
                landmark55.append(list(li)[1])
            # print (landmark55)

            bbox, point = boxes, landmark55
            point = np.array(point).reshape((5, 2))
            # print(bbox)
            # print(point)
            nimg = face_preprocess.preprocess(img, bbox, point, image_size='112,112')
            # nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
            # # cv2.imshow("get",nimg)
            # nimg = np.transpose(nimg, (2, 0, 1))

            data = {
                'box': box_val,
                'is_mask': is_mask,
                'face_angle': yaw,
                'nimg': nimg
            }
            res.append(data)

        return None, res

    # 获取旋转向量和平移向量
    def get_pose_estimation(self, img_size, image_points):
        # 3D model points.
        model_points = np.array([
            (0.0, 0.0, 0.0),  # Nose tip
            (0.0, -330.0, -65.0),        # Chin
            (-225.0, 170.0, -135.0),  # Left eye left corner
            (225.0, 170.0, -135.0),  # Right eye right corne
            (-150.0, -150.0, -125.0),  # Left Mouth corner
            (150.0, -150.0, -125.0)  # Right mouth corner

        ])

        # Camera internals

        focal_length = img_size[1]
        center = (img_size[1] / 2, img_size[0] / 2)
        camera_matrix = np.array(
            [[focal_length, 0, center[0]],
             [0, focal_length, center[1]],
             [0, 0, 1]], dtype="double"
        )

        print("Camera Matrix :{}".format(camera_matrix))

        dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
        (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
                                                                      dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)

        print("Rotation Vector:\n {}".format(rotation_vector))
        print("Translation Vector:\n {}".format(translation_vector))
        return success, rotation_vector, translation_vector, camera_matrix, dist_coeffs

    def get_euler_angle(self, rotation_vector):
        # calculate rotation angles
        theta = cv2.norm(rotation_vector, cv2.NORM_L2)

        # transformed to quaterniond
        w = math.cos(theta / 2)
        x = math.sin(theta / 2) * rotation_vector[0][0] / theta
        y = math.sin(theta / 2) * rotation_vector[1][0] / theta
        z = math.sin(theta / 2) * rotation_vector[2][0] / theta

        ysqr = y * y
        # pitch (x-axis rotation)
        t0 = 2.0 * (w * x + y * z)
        t1 = 1.0 - 2.0 * (x * x + ysqr)
        print('t0:{}, t1:{}'.format(t0, t1))
        pitch = math.atan2(t0, t1)

        # yaw (y-axis rotation)
        t2 = 2.0 * (w * y - z * x)
        if t2 > 1.0:
            t2 = 1.0
        if t2 < -1.0:
            t2 = -1.0
        yaw = math.asin(t2)

        # roll (z-axis rotation)
        t3 = 2.0 * (w * z + x * y)
        t4 = 1.0 - 2.0 * (ysqr + z * z)
        roll = math.atan2(t3, t4)

        print('pitch:{}, yaw:{}, roll:{}'.format(pitch, yaw, roll))

        # 单位转换：将弧度转换为度
        Y = int((pitch / math.pi) * 180)
        X = int((yaw / math.pi) * 180)
        Z = int((roll / math.pi) * 180)

        return 0, Y, X, Z