# -*-coding: utf-8 -*-
"""
    @Author : panjq
    @E-mail : pan_jinquan@163.com
    @Date   : 2021-08-24 10:35:47
"""
import argparse
import os
import cv2
import torch
import numpy as np
from detection.models import build_models
from detection.models.yolo.yolox.utils import postprocess
from detection.transforms.build_transform import BoxesTransform
from pybaseutils import image_utils, setup_config, file_utils
from basetrainer.utils import torch_tools


class YOLOInference(object):
    def __init__(self, cfg, padding=False, freeze_header=True, device="cuda:0"):
        self.class_name, self.class_dict = file_utils.parser_classes(cfg.class_name)
        self.num_classes = len(self.class_name)
        self.padding = padding
        self.model_file = cfg.model_file
        self.width_mult = cfg.width_mult
        self.device = device
        self.model = self.build_model(cfg.net_type)
        self.model = self.load_model(self.model, self.model_file)
        self.prob_threshold = cfg.prob_threshold
        self.iou_threshold = cfg.iou_threshold
        self.input_size = cfg.input_size
        self.transforms = BoxesTransform(self.input_size, padding=padding, norm=True, trans_type="TestYolo")
        # self.transforms = BoxesTransform(cfg.input_size, mean=self.rgb_mean, std=self.rgb_std,
        #                                  padding=padding, trans_type="test")

    def build_model(self, net_type):
        model = build_models.build_net_v3(net_type,
                                          self.num_classes,
                                          width_mult=self.width_mult,
                                          phase='test',
                                          device=self.device)
        return model

    def load_model(self, model, model_file):
        state_dict = torch_tools.load_state_dict(model_file)
        model.load_state_dict(state_dict)
        model = model.to(self.device)
        model.eval()
        return model

    def pre_process(self, image, input_size):
        if isinstance(image, np.ndarray):
            image = [image]
        image_tensor = []
        for img in image:
            img, _, _ = self.transforms(img, [], [])
            # image_utils.cv_show_image("image", img)
            img = img.unsqueeze(0)
            image_tensor.append(img)
        image_tensor = torch.cat(image_tensor)
        image_tensor = torch.tensor(image_tensor, dtype=torch.float32)
        return image_tensor

    def inference(self, input_tensor):
        """
         return output=(x1, y1, x2, y2, obj_conf, class_conf, class_pred)
        """
        with torch.no_grad():
            input_tensor = input_tensor.to(self.device)
            # loc, conf, landms-> boxes,scores,landms
            output = self.model(input_tensor)
        return output

    def pose_process(self, outputs, image_sizes):
        """
        :param outputs: (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
        :param image_sizes:
        return （xmin,ymin,xmax,ymax,score,label）
        """
        # Could not run 'torchvision::nms' with arguments from the 'CUDA' backend.
        outputs = outputs.cpu()
        outputs = postprocess(outputs, self.num_classes, self.prob_threshold, self.iou_threshold)
        batch_dets = []
        for i in range(len(outputs)):
            output = outputs[i]
            image_size = image_sizes[i]
            dets = []
            if output is not None:
                # (x1, y1, x2, y2, obj_conf, class_conf, label)==>(xmin,ymin,xmax,ymax,score,label）
                output = output.cpu().numpy()
                dets = np.zeros(shape=(len(output), 6), dtype=np.float32)
                dets[:, 5] = output[:, 6]
                dets[:, 4] = output[:, 4] * output[:, 5]  # score=obj_conf* class_conf
                if self.padding:
                    dets[:, 0:4] = output[:, 0:4]
                    image_utils.image_boxes_resize_padding_inverse(image_size, self.input_size, dets[:, 0:4])
                else:
                    bboxes_scale = [self.input_size[0] / image_size[0], self.input_size[1] / image_size[1]] * 2
                    dets[:, 0:4] = output[:, 0:4] / bboxes_scale
            batch_dets.append(dets)
        return batch_dets

    def detect(self, rgb_image, vis=False):
        """
        :param rgb_image:
        return （xmin,ymin,xmax,ymax,score,label）
        """
        shape = rgb_image.shape
        image_sizes = [[shape[1], shape[0]]]
        input_tensor = self.pre_process(rgb_image.copy(), self.input_size)
        outputs = self.inference(input_tensor)
        batch_dets = self.pose_process(outputs, image_sizes)
        dets = batch_dets[0]
        if vis:
            self.show_image(rgb_image, dets)
        return dets

    def label2class_name(self, pred_index):
        if self.class_name:
            pred_index = np.reshape(pred_index, newshape=(-1,))
            pred_index = [self.class_name[int(i)] for i in pred_index]
        return pred_index

    def detect_image_dir(self, image_dir, isshow=True):
        """
        :param image_dir: directory or image file path
        :param isshow:<bool>
        :return:
        """
        image_list = file_utils.get_files_lists(image_dir)
        for img_path in image_list:
            print("img_path:{}".format(img_path))
            image = cv2.imread(img_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            self.detect(image, vis=isshow)

    def show_image(self, image, dets, landms=None, waitKey=0):
        """
        :param image
        :param dets
        :return:
        """
        # image = image_utils.resize_image(image, 416, 416)
        if not landms is None and len(landms) > 0:
            image = image_utils.draw_landmark(image, landms, vis_id=False)
        print("image:{}".format(image.shape))
        print("dets:{}".format(dets))
        print("landms:{}".format(landms))
        if len(dets) > 0:
            bboxes = dets[:, 0:4]
            scores = dets[:, 4:5]
            labels = dets[:, 5:6]
            # labels = self.label2class_name(labels)
            image = image_utils.draw_image_detection_bboxes(image, bboxes, scores, labels, class_name=self.class_name)
        image_utils.cv_show_image("image", image, waitKey=waitKey)
        return image


def get_parser():
    width_mult = 1.0
    net_type = "yolox-nano"
    input_size = [416, 416]
    # image_dir = "../data/test_image"
    # image_dir = "/home/dm/data3/FaceDetector/YOLO/YOLOX/data/images"
    image_dir = "/home/dm/data3/dataset/face_person/COCO/val2017/images"
    model_file = "/home/dm/data3/release/infrastructure/DMDetection/work_space/COCO/yolox-nano_1.0_yolox_416_416_20210918152451/model/best_model_173_0.3653.pth"
    # model_file = "../data/pretrained/pth/yolox_nano.pth"
    video_path = None
    class_name = "/home/dm/data3/release/infrastructure/DMDetection/data/class_name/coco_class_name80.txt"
    parser = argparse.ArgumentParser(description='Detection Test')
    parser.add_argument("-c", "--config_file", help="configs file", default=None, type=str)
    parser.add_argument('-m', '--model_file', type=str, default=model_file, help='model file path')
    parser.add_argument('--net_type', type=str, default=net_type, help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--width_mult', type=float, default=width_mult,
                        help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--prob_threshold', type=float, default=0.5, help='confidence_threshold')
    parser.add_argument('--iou_threshold', type=float, default=0.3, help='iou_threshold')
    parser.add_argument('--image_dir', type=str, default=image_dir, help='directory or image path')
    parser.add_argument('--video_path', type=str, default=video_path, help='directory or image path')
    parser.add_argument('--input_size', nargs='+', help="--input size [600(W),600(H)]", type=int, default=input_size)
    parser.add_argument("--class_name", help="int,list,dict or file", default=class_name)
    parser.add_argument('--device', type=str, default="cuda:0", help='device')
    return parser


if __name__ == '__main__':
    parser = get_parser()
    cfg = setup_config.parser_config(parser.parse_args(), cfg_updata=False)
    det = YOLOInference(cfg, padding=False, device=cfg.device)
    det.detect_image_dir(cfg.image_dir, isshow=True)
    # det.start_capture(args.video_path)
