# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : panjq
# @E-mail : pan_jinquan@163.com
# @Date   : 2020-04-03 18:38:34
# --------------------------------------------------------
"""
import os, sys

sys.path.append(os.getcwd())
import cv2
import argparse
import numpy as np
import torch
from detection.models import build_models
from detection.anchor_utils.prior_box import PriorBox
from detection.anchor_utils import anchor_utils
from detection.anchor_utils.nms import py_cpu_nms
from pybaseutils import image_utils, setup_config, file_utils
from detection.transforms.build_transform import BoxesTransform
from basetrainer.utils import torch_tools


class SSDInference(object):
    def __init__(self, cfg, padding=False, freeze_header=True, device="cuda:0"):
        """
        :param args:
        :param padding: 等比例缩放
        :param freeze_header:
        :param device:
        """
        self.device = device
        self.padding = padding
        self.cfg = cfg
        self.net_type = cfg.net_type
        self.priors_type = cfg.priors_type
        self.prob_threshold = cfg.prob_threshold
        self.iou_threshold = cfg.iou_threshold
        self.model_file = cfg.model_file
        self.width_mult = cfg.width_mult
        self.top_k = 5000
        self.keep_top_k = 750
        self.input_size = cfg.input_size
        self.freeze_header = freeze_header
        self.model, self.prior_boxes = self.build_model(self.net_type, self.priors_type)
        self.class_name = self.prior_boxes.class_name
        self.priors_cfg = self.prior_boxes.get_prior_cfg()
        self.priors = self.prior_boxes.priors.to(self.device)
        self.model = self.load_model(self.model, self.model_file)
        self.rgb_mean = self.priors_cfg.rgb_mean
        self.rgb_std = self.priors_cfg.rgb_std
        self.transforms = BoxesTransform(cfg.input_size, mean=self.rgb_mean, std=self.rgb_std,
                                         padding=padding, trans_type="test")
        print('Finished loading model!')

    def build_model(self, net_type, priors_type, version="v2"):
        priorbox = PriorBox(self.cfg, input_size=self.input_size,
                            priors_type=priors_type, freeze_header=self.freeze_header)
        model = build_models.build_ssd_model_v2(self.net_type,
                                                priorbox,
                                                width_mult=self.width_mult,
                                                pretrained=False,
                                                phase='test',
                                                device=self.device)
        model = model.to(self.device)
        return model, priorbox

    def load_model(self, model, model_file):
        """
        :param model:
        :param model_file:
        :param load_to_cpu:
        :return:
        """
        name_map = {
            "base_net": "backbone",
            "extras": "extra_layer",
            "classification_headers": "class_headers",
            "regression_headers": "bbox_headers",
            "landms_headers": "landm_headers",
        }
        state_dict = torch_tools.load_state_dict(model_file)
        state_dict = torch_tools.rename_module(state_dict, name_map=name_map)
        model.load_state_dict(state_dict)
        model = model.to(self.device)
        model.eval()
        return model

    def pre_process(self, image, input_size):
        if isinstance(image, np.ndarray):
            image = [image]
        image_tensor = []
        for img in image:
            img, _, _ = self.transforms(img, [], [])
            # image_utils.cv_show_image("image", img)
            img = img.unsqueeze(0)
            image_tensor.append(img)
        image_tensor = torch.cat(image_tensor)
        # image_tensor = torch.tensor(image_tensor, dtype=torch.float32)
        return image_tensor

    def pre_process_v2(self, image, input_size):
        """
        :param image:
        :param input_size: model input size [W,H]
        :param mean:
        :return:image_tensor: out image tensor[1,channels,W,H]
                input_size  : model new input size [W,H]
        """
        if isinstance(image, np.ndarray):
            image = [image]
        image_tensor = []
        for img in image:
            if self.padding:
                out_img = image_utils.image_boxes_resize_padding(img, input_size, None)
            else:
                out_img = image_utils.resize_image(img, size=input_size)
            # image_utils.cv_show_image("image", out_img)
            out_img = np.float32(out_img)
            out_img -= self.rgb_mean
            out_img /= self.rgb_std
            out_img = out_img.transpose(2, 0, 1)
            out_img = out_img[np.newaxis, :]
            image_tensor.append(out_img)
        image_tensor = np.concatenate(image_tensor, axis=0)
        image_tensor = torch.from_numpy(image_tensor)
        return image_tensor

    def pose_process(self, outputs, image_sizes):
        """
        :param outputs:
        :param image_size: [W,H]
        return （xmin,ymin,xmax,ymax,score,label）
        """
        bboxes, scores = outputs
        if not self.prior_boxes.freeze_header:
            variances = [self.prior_boxes.center_variance, self.prior_boxes.size_variance]
            bboxes = anchor_utils.decode(bboxes, self.priors, variances)
        bboxes = bboxes.cpu().numpy()
        scores = scores.cpu().numpy()
        batch_dets = []
        for i in range(len(bboxes)):
            image_size = image_sizes[i]
            dets, labels = py_cpu_nms.bboxes_nms(bboxes[i], scores[i],
                                                 prob_threshold=self.prob_threshold,
                                                 iou_threshold=self.iou_threshold,
                                                 top_k=self.top_k,
                                                 start=1,
                                                 keep_top_k=self.keep_top_k)
            dets = dets.reshape(len(dets), 5)
            if len(dets) == 0:
                batch_dets.append(dets)
                continue
            if self.padding:
                bboxes_scale = np.asarray(self.input_size * 2, dtype=np.float32)
                dets[:, 0:4] = dets[:, 0:4] * bboxes_scale
                image_utils.image_boxes_resize_padding_inverse(image_size, self.input_size, dets[:, 0:4])
            else:
                bboxes_scale = np.asarray(image_size * 2, dtype=np.float32)
                dets[:, 0:4] = dets[:, 0:4] * bboxes_scale
            dets = np.concatenate([dets, labels.reshape(-1, 1)], axis=1)  # （xmin,ymin,xmax,ymax,score,label）
            batch_dets.append(dets)
        return batch_dets

    def inference(self, input_tensor):
        with torch.no_grad():
            input_tensor = input_tensor.to(self.device)
            # loc, conf, landms-> boxes,scores,landms
            output = self.model(input_tensor)
        return output

    def detect(self, rgb_image, vis=False):
        """
        :param rgb_image:
        return （xmin,ymin,xmax,ymax,score,label）
        """
        shape = rgb_image.shape
        image_sizes = [[shape[1], shape[0]]]
        input_tensor = self.pre_process(rgb_image, input_size=self.input_size)
        outputs = self.inference(input_tensor)
        # (xmin,ymin,xmax,ymax,score,label）
        batch_dets = self.pose_process(outputs, image_sizes=image_sizes)
        dets = batch_dets[0]
        if vis:
            self.show_image(rgb_image, dets)
        return dets

    def label2class_name(self, pred_index):
        if self.class_name:
            pred_index = np.reshape(pred_index, newshape=(-1,))
            pred_index = [self.class_name[int(i)] for i in pred_index]
        return pred_index

    def detect_image_dir(self, image_dir, vis=True):
        """
        :param image_dir: directory or image file path
        :param vis:<bool>
        :return:
        """
        image_list = file_utils.get_files_lists(image_dir)
        for img_path in image_list:
            print("img_path:{}".format(img_path))
            image = cv2.imread(img_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            self.detect(image, vis=vis)

    def show_image(self, image, dets, landms=None, delay=0):
        """
        :param image is RGB
        :param dets
        :return:
        """
        if not landms is None and len(landms) > 0:
            image = image_utils.draw_landmark(image, landms, vis_id=False)
        print("image:{}".format(image.shape))
        print("dets:{}".format(dets))
        print("landms:{}".format(landms))
        if len(dets) > 0:
            bboxes = dets[:, 0:4]
            scores = dets[:, 4:5]
            labels = dets[:, 5:6]
            # labels = self.label2class_name(labels)
            image = image_utils.draw_image_detection_bboxes(image, bboxes, scores, labels, class_name=self.class_name)
        image_utils.cv_show_image("image", image, delay=delay, use_rgb=True)
        return image


def get_parser():
    input_size = [320, 160]
    # image_dir = "/home/dm/data3/dataset/card_datasets/card_test/card"
    image_dir = "/home/dm/nasdata/dataset/new_retails/VOC/202107_val/"
    model_file = "../data/retails/RFB_default_320_160_4.0439.pth"
    net_type = "rfb"
    # priors_type = "card"
    priors_type = "default"
    class_name = 2
    parser = argparse.ArgumentParser(description='Detection Test')
    # parser.add_argument("-c", "--config_file", help="configs file", default="./configs/config.yaml", type=str)
    parser.add_argument('-m', '--model_file', type=str, default=model_file, help='model file path')
    parser.add_argument('--net_type', type=str, default=net_type, help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--width_mult', type=float, default=1.0, help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--priors_type', type=str, default=priors_type,
                        help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--prob_threshold', type=float, default=0.5, help='confidence_threshold')
    parser.add_argument('--iou_threshold', type=float, default=0.3, help='iou_threshold')
    parser.add_argument('--image_dir', type=str, default=image_dir, help='directory or image path')
    parser.add_argument('--input_size', nargs='+', help="--input size [600(W),600(H)]", type=int, default=input_size)
    parser.add_argument("--class_name", help="int,list,dict or file", default=class_name)
    parser.add_argument('--device', type=str, default="cuda:0", help='device')
    return parser


if __name__ == '__main__':
    parser = get_parser()
    cfg = setup_config.parser_config(parser.parse_args(), cfg_updata=False)
    det = SSDInference(cfg, device=cfg.device)
    det.detect_image_dir(cfg.image_dir, vis=True)
