from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from scipy import misc
import sys
import os
import argparse
# import tensorflow as tf
import numpy as np
import mxnet as mx
import random
import cv2
import sklearn
from sklearn.decomposition import PCA
from time import sleep
from easydict import EasyDict as edict
from mtcnn_detector import MtcnnDetector

sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src', 'common'))
import face_image
from lib.face_utils import judge_side_face
import face_preprocess
from RetinaFace.retinaface import RetinaFace
import time


def do_flip(data):
    for idx in range(data.shape[0]):
        data[idx, :, :] = np.fliplr(data[idx, :, :])


def get_model(ctx, image_size, model_str, layer):
    _vec = model_str.split(',')
    assert len(_vec) == 2
    prefix = _vec[0]
    epoch = int(_vec[1])
    print('loading', prefix, epoch)
    sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
    all_layers = sym.get_internals()
    sym = all_layers[layer + '_output']
    model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
    # model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
    model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
    model.set_params(arg_params, aux_params)
    return model


class FaceModel:
    def __init__(self, args):
        self.args = args
        ctx = mx.gpu(args.gpu)
        # ctx=mx.cpu()
        _vec = args.image_size.split(',')
        assert len(_vec) == 2
        image_size = (int(_vec[0]), int(_vec[1]))
        self.model = None
        self.ga_model = None
        if len(args.model) > 0:
            self.model = get_model(ctx, image_size, args.model, 'fc1')
        if len(args.ga_model) > 0:
            self.ga_model = get_model(ctx, image_size, args.ga_model, 'fc1')

        self.threshold = args.threshold
        self.det_minsize = 50
        self.det_threshold = [0.6, 0.7, 0.8]
        # self.det_factor = 0.9
        self.image_size = image_size
        self.margin = 3
        mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
        if args.det == 0:
            detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark=True,
                                     threshold=self.det_threshold)
        elif args.det==2:
            detector=RetinaFace('./model/Retinaface/R50', 0, 0, 'net3')
        else:
            detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark=True,
                                     threshold=[0.0, 0.0, 0.2])
        self.detector = detector

    # 输入含有一个人的图片，返回含有人物的框，对齐后的待提取特征的图像。
    def get_input(self, face_img):
        ret = self.detector.detect(face_img,0.8,do_flip=False)
        if ret is None:
            return None
        bbox, points = ret
        if bbox.shape[0] == 0:
            return None
        bbox = bbox[0, 0:4]
        point = points[0, :]
        nimg = face_preprocess.preprocess(face_img, bbox, point, image_size='112,112')
        nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
        aligned = np.transpose(nimg, (2, 0, 1))
        return bbox, aligned

    def get_feature(self, aligned):
        input_blob = np.expand_dims(aligned, axis=0)
        data = mx.nd.array(input_blob)
        db = mx.io.DataBatch(data=(data,))
        self.model.forward(db, is_train=False)
        embedding = self.model.get_outputs()[0].asnumpy()
        embedding = sklearn.preprocessing.normalize(embedding).flatten()
        return embedding

    def get_ga(self, aligned):
        input_blob = np.expand_dims(aligned, axis=0)
        data = mx.nd.array(input_blob)
        db = mx.io.DataBatch(data=(data,))
        self.ga_model.forward(db, is_train=False)
        ret = self.ga_model.get_outputs()[0].asnumpy()
        g = ret[:, 0:2].flatten()
        gender = np.argmax(g)
        a = ret[:, 2:202].reshape((100, 2))
        a = np.argmax(a, axis=1)
        age = int(sum(a))
        return gender, age

    def extract_feature_and_compare(self,data,peoples,aligned):
        ''' 人脸识别逻辑
        在本函数中，实现由特征向量到所对应的实体的关联；具体分为三个过程：
            1.先和数据库中所有特征向量计算相似度，找出最大相似度的实体，如果相似度大于0.6，则识别结果为该实体，否则转下一步；
            2.上一步如果没有识别出对象，则和最近特征库进行比对，特征库为recentFeatures,同样计算过程，如果相似度大于0.8，则认
            为是该实体，否则转下一步；

            3.和陌生人库进行比对，如果有相似度大于0.6的陌生人，则标识为该陌生人，否则，添加到陌生人特征库中。
        有一个问题留待解决：对于相似度较高的陌生人进行识别判断，如果判断成功，则从陌生人列表中删除。变成最近特征库中的内容。
        :param arrayobj:
        :param arraysets:
        :return:
        '''
        arrayobj = self.get_feature(aligned)
        arrayobj = np.array(arrayobj)
        dot = np.dot(arrayobj, peoples.T)
        norm_arrayobj = np.linalg.norm(arrayobj)
        norm_arraysets = np.array(np.linalg.norm(peoples, axis=1))
        cos = dot / (norm_arrayobj * norm_arraysets)
        index = np.argmax(cos)
        # 在数据库中比对成功；
        if cos[index] > 0.5:
            return True, data[index][0], cos[index]
        else:
            return False, data[index][0], cos[index]

    # 输入含有一个或多个人的图片，返回含有人物的框，对齐后的待提取特征的图像。
    def kalman_track(self,face_img):
        aligneds = []
        re_bboxes = []
        resize_face_img=cv2.resize(face_img,(480,270))
        img_size = np.asarray(resize_face_img.shape)[0:2]
        #ret = self.detector.detect_face(face_img, det_type=self.args.det)
        ret = self.detector.detect(resize_face_img,0.8,do_flip=False)
        addtional_attribute_list = []
        if ret is None:
            return re_bboxes, aligneds,img_size,addtional_attribute_list
        bboxs, points = ret
        if bboxs.shape[0] == 0:
            return re_bboxes, aligneds,img_size,addtional_attribute_list

        for i, bbox in enumerate(bboxs):
            score = round(bbox[4], 6)
            if score < 0.9:
                continue
            det = bbox[0:4]
            # 这里，point形状为5行两列，每一行表示一个点。
            point = points[i, :]

            # face rectangle
            det[0] = np.maximum(det[0] - self.margin, 0)
            det[1] = np.maximum(det[1] - self.margin, 0)
            det[2] = np.minimum(det[2] + self.margin, img_size[1])
            det[3] = np.minimum(det[3] + self.margin, img_size[0])

            bb = np.array(det, dtype=np.int32)
            cropped = resize_face_img[bb[1]:bb[3], bb[0]:bb[2], :].copy()
            dist_rate, high_ratio_variance, width_rate = judge_side_face(
                np.array(point))
            item_list = [cropped, score, dist_rate, high_ratio_variance, width_rate]
            addtional_attribute_list.append(item_list)

            rec_point=np.array(point*4,dtype=np.float32)
            rec_bbox=np.array(bbox[0:4],dtype=np.float32)*4
            nimg = face_preprocess.preprocess(face_img, rec_bbox, rec_point, image_size='112,112')
            nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
            aligned = np.transpose(nimg, (2, 0, 1))
            re_bboxes.append(bbox)
            aligneds.append(aligned)
        return re_bboxes,aligneds,img_size,addtional_attribute_list
