"""
@Project    : cosmo-face
@Module     : face_verify.py
@Author     : HuangJiWen[huangjiwen@haier.com]
@Created    : 2020/9/14 16:52
@Desc       : 
"""

import argparse

import cv2
import numpy as np
import torch
from PIL import Image
from skimage import transform as trans
from torchvision import transforms

from config import get_config
from detection.data import cfg_center_rfb, cfg_center_ghost, cfg_center_resnet18, cfg_center_resnet50
from detection.widerface_test_centerface import FaceDetectionLandmark
from model import Backbone, l2_norm


def align(img, landmark, **kwargs):
    """人脸对齐"""

    src = np.array([
        [30.2946, 51.6963],
        [65.5318, 51.5014],
        [48.0252, 71.7366],
        [33.5493, 92.3655],
        [62.7299, 92.2041]], dtype=np.float32)

    image_size = kwargs.get('image_size', (112, 112))
    if image_size[1] == 112:
        src[:, 0] += 8.0

    dst = landmark.astype(np.float32)

    trans_form = trans.SimilarityTransform()
    trans_form.estimate(dst, src)
    M = trans_form.params[0:2, :]
    warped = cv2.warpAffine(src=img, M=M, dsize=(image_size[1], image_size[0]), borderValue=0.0)

    return warped


def load_face_database(conf):
    embeddings = torch.load(conf.face_database_path/'face_database.pth')
    names = np.load(conf.face_database_path/'names.npy')
    return embeddings, names


def prepare_face_database(conf, recognition_model, tta=True):
    rtn_embeddings_lst, rtn_name_lst = [], []

    for path in conf.face_database_path.iterdir():
        if path.is_file():
            continue
        else:
            single_embedding_lst = []
            for file in path.iterdir():
                if not file.is_file():
                    continue
                else:
                    img = Image.open(file)
                    with torch.no_grad():
                        if tta:
                            mirror = transforms.functional.hflip(img)
                            emb = recognition_model(conf.test_transform(img).to(conf.device).unsqueeze(0))
                            emb_mirror = recognition_model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
                            single_embedding_lst.append(l2_norm(emb + emb_mirror))
                        else:
                            single_embedding_lst.append(
                                recognition_model(conf.test_transform(img).to(conf.device).unsqueeze(0)))

            if len(single_embedding_lst) == 0:
                continue
            embedding = torch.cat(single_embedding_lst).mean(0, keepdim=True)

            rtn_embeddings_lst.append(embedding)
            rtn_name_lst.append(path.name)

    face_embeddings = torch.cat(rtn_embeddings_lst)
    face_names = np.array(rtn_name_lst)
    torch.save(face_embeddings, conf.face_database_path / 'face_database.pth')
    np.save(conf.face_database_path / 'names', face_names)

    return face_embeddings, face_names


def recognition_inference(conf, recognition_model, faces, target_embeddings, threshold, tta=False):
    """
    人脸识别inference
    :param conf:
    :param recognition_model:
    :param faces:
    :param target_embeddings: [n, 512] computed embeddings of faces in face database
    :param tta: test time augmentation (hfilp, that's all)
    :return:
    """

    embedding_lst = []
    for face_img in faces:
        if tta:
            mirror = trans.functional.hflip(face_img)
            emb = recognition_model(conf.test_transform(face_img).to(conf.device).unsqueeze(0))
            emb_mirror = recognition_model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
            embedding_lst.append(l2_norm(emb + emb_mirror))
        else:
            embedding_lst.append(recognition_model(conf.test_transform(face_img).to(conf.device).unsqueeze(0)))

    source_embeddings = torch.cat(embedding_lst)
    diff = source_embeddings.unsqueeze(-1) - target_embeddings.transpose(1, 0).unsqueeze(0)
    dist = torch.sum(torch.pow(diff, 2), dim=1)
    minimum, min_idx = torch.min(dist, dim=1)
    min_idx[minimum > threshold] = -1  # if no match, set idx to -1

    return min_idx, minimum


if __name__ == "__main__":

    torch.set_grad_enabled(False)

    parser = argparse.ArgumentParser(description='for face verification')
    # face detection params
    parser.add_argument('-m', '--trained_model',
                        default='F:/gitee_project/cosmo-face/detection/weights/resnet50_center_Final.pth',
                        type=str, help='Trained state_dict file path to open')
    parser.add_argument('--network', default='resnet50', help='Backbone network resnet18 or ghost or RFB or resnet50')
    parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
    parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
    parser.add_argument('--confidence_threshold', default=0.1, type=float, help='confidence_threshold')
    parser.add_argument('--top_k', default=5000, type=int, help='top_k')
    parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
    parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
    parser.add_argument('-s', '--save_image', default=True, help='show detection results')
    # parser.add_argument('--vis_thres', default=0.01, type=float, help='visualization_threshold')

    # face recognition params
    parser.add_argument('-th', '--threshold', help='threshold to decide identical faces', default=1.3, type=float)
    parser.add_argument("-u", "--update", help="whether perform update the face database", default=True, type=bool)
    parser.add_argument("-tta", "--tta", help="whether test time augmentation", default=False, type=bool)

    # 测试数据集路径
    parser.add_argument('--test_image_path', default="./data/image_test/bao_chen.jpg", type=str, help='test image path')
    parser.add_argument('--recognition_model_path', default="./weights/model_irse50_final.pth",
                        type=str, help='recognition model path')

    args = parser.parse_args()

    # #############   人脸检测与关键点检测阶段   ##############
    # 设置人脸检测和关键点检测参数
    cfg = None
    if args.network == "resnet18":
        cfg = cfg_center_resnet18
    elif args.network == "resnet50":
        cfg = cfg_center_resnet50
    elif args.network == "RFB":
        cfg = cfg_center_rfb
    elif args.network == "ghost":
        cfg = cfg_center_ghost
    else:
        print("Don't support network!")
        exit(0)
    # 导入检测模型
    face_detection_landmark = FaceDetectionLandmark(cfg=cfg, args=args)

    # #############   人脸识别与比对阶段   ##############
    # 设置识别模型参数
    conf1 = get_config(training=False)

    # 导入识别模型
    model1 = Backbone(conf1.net_depth, conf1.drop_ratio, conf1.net_mode).to(conf1.device)
    model1.load_state_dict(torch.load(args.recognition_model_path))
    model1.eval()
    print('model loaded success! ')

    # 是否需要更新人脸库
    if args.update:
        targets, names = prepare_face_database(conf=conf1, recognition_model=model1, tta=args.tta)
    else:
        targets, names = load_face_database(conf=conf1)

    # single image predict
    # image_path = "../detection/test/temp.jpg"
    img_raw = cv2.imread(filename=args.test_image_path, flags=cv2.IMREAD_COLOR)
    img = np.float32(img_raw)
    # 获取关键点
    bounding_boxes, key_points = face_detection_landmark.predict(img)

    face_lst = []
    for i in range(bounding_boxes.shape[0]):
        bounding_box = list(map(int, bounding_boxes[i, :-1]))
        five_point = key_points[i, :10].reshape(-1, 2)
        face = img_raw[bounding_box[1]:bounding_box[3], bounding_box[0]:bounding_box[2]]
        align_img = align(img=img_raw, landmark=five_point, image_size=(112, 112))
        # cv2.imwrite("./data/image_test/che_wang_{}.jpg".format(i), align_img)
        face_lst.append(align_img)

    results, score = recognition_inference(conf=conf1, recognition_model=model1, faces=face_lst,
                                           target_embeddings=targets, threshold=args.threshold)
    print(score)
    for idx, bbox in enumerate(bounding_boxes):
        if results[idx] == -1:
            continue
        name = str(names[results[idx]]) + '_{}'.format(str(round(float(score[idx]), 2)))
        bounding_box = list(map(int, bounding_boxes[idx, :-1]))
        cv2.rectangle(img_raw, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), (0, 0, 255), 1)
        cv2.putText(img_raw, name, (bounding_box[0], bounding_box[1]), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 255, 0), 3, cv2.LINE_AA)

    cv2.imshow("img", img_raw)
    cv2.waitKey(0)
