"""
@Project    : cosmo-face
@Module     : widerface_test_centerface.py
@Author     : HuangJiWen[huangjiwen@haier.com]
@Created    : 2020/7/27 14:49
@Desc       : 
"""

from __future__ import print_function

import argparse
import os

import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn

from detection.data import cfg_center_rfb, cfg_center_ghost, cfg_center_resnet18, cfg_center_resnet50
from models.center_face.ghostnet_face import CenterGhostNetFace
from models.center_face.resnet_face import CenterResNetFace
from models.center_face.rfbnet_face import CenterRFBNetFace
from utils.constant_func import multi_pose_decode
from utils.timer import Timer


def check_keys(model, pretrained_state_dict):
    ckpt_keys = set(pretrained_state_dict.keys())
    model_keys = set(model.state_dict().keys())
    used_pretrained_keys = model_keys & ckpt_keys
    unused_pretrained_keys = ckpt_keys - model_keys
    missing_keys = model_keys - ckpt_keys
    print('Missing keys:{}'.format(len(missing_keys)))
    print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
    print('Used keys:{}'.format(len(used_pretrained_keys)))
    assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
    return True


def remove_prefix(state_dict, prefix):
    """ Old style model is stored with all names of parameters sharing common prefix 'module."""
    print('remove prefix \'{}\''.format(prefix))
    f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
    return {f(key): value for key, value in state_dict.items()}


def load_model(model, pretrained_path, load_to_cpu):
    print('Loading pretrained model from {}'.format(pretrained_path))
    if load_to_cpu:
        pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
    else:
        device = torch.cuda.current_device()
        pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))

    if "state_dict" in pretrained_dict.keys():
        pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
    else:
        pretrained_dict = remove_prefix(pretrained_dict, 'module.')

    check_keys(model, pretrained_dict)
    model.load_state_dict(pretrained_dict, strict=False)

    return model


def get_scale(img1_shape, img0_shape):
    # Rescale coords (xyxy) from img1_shape to img0_shape

    gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
    pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding

    return pad, gain


class FaceDetectionLandmark:

    def __init__(self, cfg, args):
        self.cfg = cfg
        self.args = args
        self.image_size = None
        self.origin_shape = None
        self.device = torch.device("cpu" if self.cfg["cpu"] else "cuda")
        self._t = {'forward_pass': Timer(), 'misc': Timer()}

        if self.cfg["name"] == "resnet18" or self.cfg["name"] == "resnet50":
            self.model = CenterResNetFace(cfg=self.cfg)
        elif self.cfg["name"] == "ghost":
            self.model = CenterGhostNetFace(cfg=self.cfg)
        elif self.cfg["name"] == "RFB":
            self.model = CenterRFBNetFace(cfg=self.cfg)
        else:
            print("Don't support network!")
            exit(0)
        self.model = load_model(self.model, self.args.trained_model, self.cfg["cpu"])
        self.model.eval()
        if torch.cuda.is_available():
            self.model = self.model.to(self.device)
            cudnn.benchmark = True

    def pre_process(self, image):

        # 采用长边对齐
        self.origin_shape = image.shape[:2]
        img_shape = image.shape
        im_size_max = np.max(img_shape[0:2])
        resize = float(self.cfg["image_size"]) / float(im_size_max)
        if resize != 1:
            image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
        self.image_size = image.shape[:2]
        image -= (104, 117, 123)
        image = image.transpose(2, 0, 1)
        image = torch.from_numpy(image).unsqueeze(0)

        return image

    def post_process(self, outputs):
        hm, wh, hps, reg, hm_hp, hp_offset = outputs
        hm = hm.sigmoid_()
        hm_hp = hm_hp.sigmoid_()

        dets = multi_pose_decode(heat=hm, wh=wh, kps=hps, reg=reg, hm_hp=hm_hp, hp_offset=hp_offset,
                                 K=self.cfg["topk"])
        pad, gain = get_scale(self.image_size, self.origin_shape)
        bboxes = dets[:, :, :4].squeeze(0) * self.cfg["down_ratio"]
        scores = dets[:, :, 4:5].squeeze(0)
        pts = dets[:, :, 5:15].squeeze(0) * self.cfg["down_ratio"]
        pts_scores = dets[:, :, 15:20].squeeze(0)

        bboxes[:, [0, 2]] -= pad[0]  # x padding
        bboxes[:, [1, 3]] -= pad[1]  # y padding
        bboxes /= gain
        # print(pad, gain)
        bboxes[:, 0].clamp_(0, self.origin_shape[1])  # x1
        bboxes[:, 1].clamp_(0, self.origin_shape[0])  # y1
        bboxes[:, 2].clamp_(0, self.origin_shape[1])  # x2
        bboxes[:, 3].clamp_(0, self.origin_shape[0])  # y2

        pts[:, 0::2] -= pad[0]
        pts[:, 1::2] -= pad[1]
        pts /= gain
        pts[:, 0::2].clamp_(0, self.origin_shape[1])
        pts[:, 1::2].clamp_(0, self.origin_shape[0])
        bboxes = torch.cat([bboxes, scores], dim=1).cpu().numpy()
        pts = torch.cat([pts, pts_scores], dim=1).cpu().numpy()

        return bboxes, pts

    @staticmethod
    def draw_results(img, bboxes, pts, color=(0, 0, 255)):

        for index, boxes in enumerate(bboxes):
            bbox = boxes[:4]
            bbox_prob = boxes[4]
            keypoints = pts[index, 0:10]
            keypoints_prob = pts[index, 10:]
            txt = '{}{:.2f}'.format('face', bbox_prob)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
            # draw bounding box
            bbox = np.array(bbox, dtype=np.int32)
            # print(bbox)
            x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
            cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)

            cv2.rectangle(img, (x1, y1 - cat_size[1] - 2), (x1 + cat_size[0], y1 - 2), color, -1)
            cv2.putText(img, txt, (x1, y1 - 2), font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)

            points = np.array(keypoints, dtype=np.int32).reshape(-1, 2)
            points_prob = np.array(keypoints_prob, dtype=np.float32).reshape(-1)

            for j in range(points.shape[0]):
                # if points_prob[j] > 0.:
                x, y = points[j, 0], points[j, 1]
                cv2.circle(img, (x, y), 2, (255, 255, 255), -1)

        return img

    def predict(self, img):

        image = self.pre_process(image=img)
        if torch.cuda.is_available():
            image = image.to(self.device)
        self._t['forward_pass'].tic()
        with torch.no_grad():
            outputs = self.model(image)
            self._t['forward_pass'].toc()
            self._t['misc'].tic()
            bboxes, pts = self.post_process(outputs=outputs)
            self._t['misc'].toc()
            indexes = np.where(bboxes[:, 4] > self.cfg["vis_threshold"])[0]
            bboxes = bboxes[indexes, :]
            pts = pts[indexes, :]

        return bboxes, pts

    def predict_folder(self):
        test_data_folder = self.args.dataset_folder
        test_data_lst = self.args.dataset_folder[:-7] + "wider_val.txt"
        with open(test_data_lst, 'r') as fr:
            test_data = fr.read().split()

        num_images = len(test_data)
        for i, image_name in enumerate(test_data):
            img_raw = cv2.imread(filename=test_data_folder + image_name, flags=cv2.IMREAD_COLOR)
            img = np.float32(img_raw)
            # predict img
            bounding_boxes, key_points = self.predict(img)
            # save to txt
            save_name = self.args.save_folder + image_name[:-4] + ".txt"
            dir_name = os.path.dirname(save_name)
            if not os.path.isdir(dir_name):
                os.makedirs(dir_name)
            with open(save_name, "w") as fd:
                file_name = os.path.basename(save_name)[:-4] + "\n"
                bounding_boxes_num = str(len(bounding_boxes)) + "\n"
                fd.write(file_name)
                fd.write(bounding_boxes_num)
                for box in bounding_boxes:
                    x = int(box[0])
                    y = int(box[1])
                    w = int(box[2]) - int(box[0])
                    h = int(box[3]) - int(box[1])
                    confidence = str(box[4])
                    line = str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " + confidence + " \n"
                    fd.write(line)

            print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(
                i + 1, num_images, self._t['forward_pass'].average_time, self._t['misc'].average_time))

            # save image
            if args.save_image:
                image_save_dir = "./results/results_center_{}/".format(self.args.network)
                im = self.draw_results(img_raw, bounding_boxes, key_points)
                if not os.path.exists(image_save_dir):
                    os.makedirs(image_save_dir)
                cv2.imwrite("{}{}.jpg".format(image_save_dir, str(i)), im)


if __name__ == '__main__':

    torch.set_grad_enabled(False)

    parser = argparse.ArgumentParser(description='Centerface')
    parser.add_argument('-m', '--trained_model',
                        default='F:/gitee_project/cosmo-face/detection/weights/resnet50_center_Final.pth',
                        # default='F:/gitee_project/cosmo-face/detection/weights/RFB_center_epoch_245.pth',
                        # default='F:/gitee_project/cosmo-face/detection/weights/resnet_epoch_240.pth',
                        # default='F:/gitee_project/cosmo-face/detection/weights/ghost_0.25_centernet_Final.pth',
                        type=str, help='Trained state_dict file path to open')
    parser.add_argument('--network', default='resnet50', help='Backbone network resnet18 or ghost or RFB or resnet50')
    parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
    parser.add_argument('--save_folder',
                        default='./widerface_evaluate/widerface_txt/', type=str,
                        help='Dir to save txt results')
    parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
    parser.add_argument('--dataset_folder', default='F:/gitee_project/cosmo-face/detection/data/wider_face/val/images/',
                        type=str, help='dataset path')
    parser.add_argument('--confidence_threshold', default=0.1, type=float, help='confidence_threshold')
    parser.add_argument('--top_k', default=5000, type=int, help='top_k')
    parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
    parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
    parser.add_argument('-s', '--save_image', default=True, help='show detection results')
    # parser.add_argument('--vis_thres', default=0.01, type=float, help='visualization_threshold')
    args = parser.parse_args()

    cfg = None
    if args.network == "resnet18":
        cfg = cfg_center_resnet18
    elif args.network == "resnet50":
        cfg = cfg_center_resnet50
    elif args.network == "RFB":
        cfg = cfg_center_rfb
    elif args.network == "ghost":
        cfg = cfg_center_ghost
    else:
        print("Don't support network!")
        exit(0)

    face_detection_landmark = FaceDetectionLandmark(cfg=cfg, args=args)

    # single image predict
    image_path = "./test/Aaron_Peirsol_0001.jpg"
    img_raw = cv2.imread(filename=image_path, flags=cv2.IMREAD_COLOR)
    img = np.float32(img_raw)

    bounding_boxes, key_points = face_detection_landmark.predict(img)
    print(bounding_boxes)
    print(key_points)
    im = face_detection_landmark.draw_results(img, bounding_boxes, key_points)
    cv2.imwrite("./output/center_resnet50_test.jpg", im)

    print("***************************************************")

    # folder image predict
    face_detection_landmark.predict_folder()
