"""
@Project    : cosmo-face
@Module     : widerface_test_retina.py
@Author     : HuangJiWen[huangjiwen@haier.com]
@Created    : 2020/8/16 21:30
@Desc       : 测试widerface
"""

from __future__ import print_function

import argparse
import os

import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torchvision.ops import nms

from data import cfg_retina_mobilenet, cfg_retina_slim, cfg_retina_rfb, cfg_retina_ghost, cfg_retina_resnet50
from layers.functions.prior_box import PriorBox
from models.retina_face.ghostnet_face import RetinaGhostNetFace
from models.retina_face.mobilenet_resnet50_face import RetinaMobileRes50NetFace
from models.retina_face.rfbnet_face import RetinaRFBNetFace
from models.retina_face.slimnet_face import RetinaSlimNetFace
from utils.box_utils import decode, decode_landm
from utils.timer import Timer


def check_keys(model, pretrained_state_dict):

    ckpt_keys = set(pretrained_state_dict.keys())
    model_keys = set(model.state_dict().keys())
    used_pretrained_keys = model_keys & ckpt_keys
    unused_pretrained_keys = ckpt_keys - model_keys
    missing_keys = model_keys - ckpt_keys
    print('Missing keys:{}'.format(len(missing_keys)))
    print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
    print('Used keys:{}'.format(len(used_pretrained_keys)))
    assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'

    return True


def remove_prefix(state_dict, prefix):
    """Old style model is stored with all names of parameters sharing common prefix 'module."""
    print('remove prefix \'{}\''.format(prefix))
    f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
    return {f(key): value for key, value in state_dict.items()}


def load_model(model, pretrained_path, load_to_cpu):
    print('Loading pretrained model from {}'.format(pretrained_path))
    if load_to_cpu:
        pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
    else:
        device = torch.cuda.current_device()
        pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
    if "state_dict" in pretrained_dict.keys():
        pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
    else:
        pretrained_dict = remove_prefix(pretrained_dict, 'module.')
    check_keys(model, pretrained_dict)
    model.load_state_dict(pretrained_dict, strict=False)

    return model


class FaceDetectionLandmark:

    def __init__(self, cfg, args):
        self.cfg = cfg
        self.args = args
        self.device = torch.device("cpu" if self.cfg["cpu"] else "cuda")

        if self.cfg["name"] == "mobilenet0.25" or self.cfg["name"] == "resnet50":
            self.model = RetinaMobileRes50NetFace(cfg=self.cfg, phase='test')
        elif self.cfg["name"] == "ghost":
            self.model = RetinaGhostNetFace(cfg=self.cfg, phase='test')
        elif self.cfg["name"] == "slim":
            self.model = RetinaSlimNetFace(cfg=self.cfg, phase='test')
        elif self.cfg["name"] == "RFB":
            self.model = RetinaRFBNetFace(cfg=self.cfg, phase='test')
        else:
            print("Don't support network!")
            exit(0)

        self.model = load_model(self.model, self.args.trained_model, self.cfg["cpu"])
        self.model.eval()
        if torch.cuda.is_available():
            self.model = self.model.to(self.device)
            cudnn.benchmark = True

    def pre_process(self, img):
        # testing scale
        target_size = self.args.long_side
        max_size = self.args.long_side

        im_shape = img.shape
        im_size_min = np.min(im_shape[0:2])
        im_size_max = np.max(im_shape[0:2])
        resize = float(target_size) / float(im_size_min)
        # prevent bigger axis from being more than max_size:
        if np.round(resize * im_size_max) > max_size:
            resize = float(max_size) / float(im_size_max)

        if self.args.origin_size:
            resize = 1

        if resize != 1:
            img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)

        img_height, img_width, _ = img.shape

        img -= (104, 117, 123)
        img = img.transpose(2, 0, 1)
        img = torch.from_numpy(img).unsqueeze(0)

        return img, img_height, img_width, resize

    def post_process(self, outputs, img, img_height, img_width, resize):
        loc, conf, landmarks = outputs

        prior_box = PriorBox(self.cfg, image_size=(img_height, img_width))
        priors = prior_box.forward()
        scale = torch.Tensor([img_width, img_height, img_width, img_height])
        if torch.cuda.is_available():
            priors = priors.to(self.device)
            scale = scale.to(self.device)
        prior_data = priors.data
        boxes = decode(loc=loc.data.squeeze(0), priors=prior_data, variances=self.cfg['variance'])
        boxes = boxes * scale / resize

        landmarks = decode_landm(pre=landmarks.data.squeeze(0), priors=prior_data, variances=self.cfg['variance'])
        scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
                               img.shape[3], img.shape[2], img.shape[3], img.shape[2],
                               img.shape[3], img.shape[2]])
        if torch.cuda.is_available():
            scale1 = scale1.to(self.device)
        landmarks = landmarks * scale1 / resize

        scores = conf.squeeze(0)[:, 1]
        indexes = scores > self.args.confidence_threshold
        scores = scores[indexes]
        boxes = boxes[indexes, :]
        landmarks = landmarks[indexes, :]

        indexes = scores.sort()[1]
        scores = scores[indexes]
        boxes = boxes[indexes, :]
        landmarks = landmarks[indexes, :]

        keep = nms(boxes=boxes, scores=scores, iou_threshold=self.args.nms_threshold)
        boxes = boxes[keep, :]
        scores = scores[keep]
        landmarks = landmarks[keep, :]

        boxes = boxes.cpu().numpy()
        # scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
        scores = scores.cpu().numpy()
        landmarks = landmarks.cpu().numpy()

        # do NMS
        detects = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
        detects = np.concatenate((detects, landmarks), axis=1)
        _t['misc'].toc()

        return detects

    def draw_results(self, origin_img, detects):

        for det in detects:
            if det[4] < self.cfg["vis_threshold"]:
                continue
            text = "{:.4f}".format(det[4])
            det = list(map(int, det))
            cv2.rectangle(img=origin_img, pt1=(det[0], det[1]), pt2=(det[2], det[3]), color=(0, 0, 255), thickness=2)
            cx, cy = det[0], det[1] + 12
            cv2.putText(img=origin_img, text=text, org=(cx, cy), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.5,
                        color=(255, 255, 255))
            # landmarks
            cv2.circle(img=origin_img, center=(det[5], det[6]), radius=1, color=(0, 0, 255), thickness=4)
            cv2.circle(img=origin_img, center=(det[7], det[8]), radius=1, color=(0, 255, 255), thickness=4)
            cv2.circle(img=origin_img, center=(det[9], det[10]), radius=1, color=(255, 0, 255), thickness=4)
            cv2.circle(img=origin_img, center=(det[11], det[12]), radius=1, color=(0, 255, 0), thickness=4)
            cv2.circle(img=origin_img, center=(det[13], det[14]), radius=1, color=(255, 0, 0), thickness=4)

        return origin_img

    def predict(self, image):

        img, img_height, img_width, resize = self.pre_process(img=image)

        if torch.cuda.is_available():
            img = img.to(self.device)

        _t['forward_pass'].tic()

        with torch.no_grad():
            outputs = self.model(img)
            _t['forward_pass'].toc()
            _t['misc'].tic()
            detects = self.post_process(
                outputs=outputs, img=img, img_height=img_height, img_width=img_width, resize=resize)
            _t['misc'].toc()

        return detects

    def predict_folder(self):
        test_folder = self.args.dataset_folder
        test_list = self.args.dataset_folder[:-7] + "wider_val.txt"

        with open(test_list, 'r') as fr:
            test_set = fr.read().split()
        num_images = len(test_set)

        for i, img_name in enumerate(test_set):
            origin_img = cv2.imread(filename=test_folder + img_name, flags=cv2.IMREAD_COLOR)
            img = np.float32(origin_img)
            # predict img
            detections = self.predict(image=img)
            # save
            save_name = self.args.save_folder + img_name[:-4] + ".txt"
            dir_name = os.path.dirname(save_name)
            if not os.path.isdir(dir_name):
                os.makedirs(dir_name)
            with open(save_name, "w") as fd:
                bounding_boxes = detections
                file_name = os.path.basename(save_name)[:-4] + "\n"
                bounding_boxes_num = str(len(bounding_boxes)) + "\n"
                fd.write(file_name)
                fd.write(bounding_boxes_num)
                for box in bounding_boxes:
                    x = int(box[0])
                    y = int(box[1])
                    w = int(box[2]) - int(box[0])
                    h = int(box[3]) - int(box[1])
                    confidence = str(box[4])
                    line = str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " + confidence + " \n"
                    fd.write(line)
            print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(
                i + 1, num_images, _t["forward_pass"].average_time, _t["misc"].average_time))

            if self.args.save_image:
                image_save_dir = "./results/results_retina_{}/".format(self.args.network)
                origin_img = self.draw_results(origin_img=origin_img, detects=detections)
                if not os.path.exists(image_save_dir):
                    os.makedirs(image_save_dir)
                cv2.imwrite("{}{}.jpg".format(image_save_dir, str(i)), origin_img)


if __name__ == "__main__":

    torch.set_grad_enabled(False)

    parser = argparse.ArgumentParser(description='Retinaface')
    parser.add_argument('-m', '--trained_model',
                        # default='F:/gitee_project/cosmo-face/detection/weights/mobilenet0.25_epoch_245.pth',
                        # default='F:/gitee_project/cosmo-face/detection/weights/ghost_retina_2_epoch_245.pth',
                        # default='F:/gitee_project/cosmo-face/detection/weights/RFB_mosaic_epoch_245.pth',
                        default="F:/gitee_project/cosmo-face/detection/weights/Resnet50_retina_Final.pth",
                        type=str, help='Trained state_dict file path to open')
    parser.add_argument('--network', default='resnet50',
                        help='Backbone network mobilenet0.25 or slim or RFB or ghost or resnet50')
    parser.add_argument('--origin_size', default=False, type=str, help='Whether use origin image size to evaluate')
    parser.add_argument('--long_side', default=840,
                        help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
    parser.add_argument('--save_folder',
                        default='F:/gitee_project/cosmo-face/detection/widerface_evaluate/widerface_txt_retina/',
                        type=str, help='Dir to save txt results')
    parser.add_argument('--dataset_folder',
                        # default='F:/gitee_project/cosmo-face/detection/data/image_test/',
                        default='F:/gitee_project/cosmo-face/detection/data/wider_face/val/images/',
                        type=str, help='dataset path')
    parser.add_argument('--confidence_threshold', default=0.4, type=float, help='confidence_threshold')
    parser.add_argument('--top_k', default=5000, type=int, help='top_k')
    parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
    parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
    parser.add_argument('-s', '--save_image', default=True, help='show detection results')
    args = parser.parse_args()

    _t = {'forward_pass': Timer(), 'misc': Timer()}
    cfg = None
    if args.network == "mobilenet0.25":
        cfg = cfg_retina_mobilenet
    elif args.network == "resnet50":
        cfg = cfg_retina_resnet50
    elif args.network == "slim":
        cfg = cfg_retina_slim
    elif args.network == "RFB":
        cfg = cfg_retina_rfb
    elif args.network == "ghost":
        cfg = cfg_retina_ghost
    else:
        print("Don't support network!")
        exit(0)

    face_detection_landmark = FaceDetectionLandmark(cfg=cfg, args=args)

    # single image predict
    test_image_path = "F:/gitee_project/cosmo-face/detection/data/image_test/392.jpg"
    img_raw = cv2.imread(test_image_path, cv2.IMREAD_COLOR)
    img = np.float32(img_raw)
    detection1 = face_detection_landmark.predict(image=img)
    print(detection1)
    im = face_detection_landmark.draw_results(origin_img=img_raw, detects=detection1)
    cv2.imwrite("./output/retina_rfb_test.jpg", im)

    print("***************************************************")

    # folder image predict
    face_detection_landmark.predict_folder()
