import os, sys

parent_path = os.path.abspath(os.path.join(__file__, *([".."] * 7)))
sys.path.insert(0, parent_path)

import numpy as np
from pathlib import Path
import yaml, math
import sahi
from sahi.slicing import slice_image

from core.algorithm.deeplearning.paddle.detectors.processors import (
    preprocess,
    decode_image,
    Resize,
    NormalizeImage,
    Permute,
    PadStride,
    LetterBoxResize,
    WarpAffine,
    Pad,
)
from core.algorithm.deeplearning.paddle.detectors.det.utils import (
    load_predictor,
    create_inputs,
    create_inputs_lite,
    multiclass_nms,
)

# 目前支持的模型
SUPPORT_MODELS = {
    "YOLO",
    "RCNN",
    "SSD",
    "Face",
    "FCOS",
    "TTFNet",
    "S2ANet",
    "JDE",
    "FairMOT",
    "DeepSORT",
    "GFL",
    "CenterNet",
    "TOOD",
    "RetinaNet",
    "StrongBaseline",
    "STGCN",
    "YOLOX",
    "PPHGNet",
    "PPLCNet",
}


# 通用目标检测模型检测器
class Detector(object):
    def __init__(
        self,
        model_dir,
        keys,
        device="CPU",
        run_mode="paddle",
        batch_size=1,
        trt_min_shape=1,
        trt_max_shape=1280,
        trt_opt_shape=640,
        trt_calib_mode=False,
        cpu_threads=1,
        enable_mkldnn=False,
        enable_mkldnn_bfloat16=False,
        threshold=0.5,
        delete_shuffle_pass=False,
    ):
        self.keys = keys
        self.pred_config = self.set_config(
            model_dir
        )  # 读取文件夹下的infer_cfg.yml配置文件
        # 根据pred_config构建transformer
        self.transformer = self.buid_transformer()
        # 构建预测器
        self.predictor, self.config = load_predictor(
            model_dir,
            run_mode=run_mode,
            batch_size=batch_size,
            min_subgraph_size=self.pred_config.min_subgraph_size,
            device=device,
            use_dynamic_shape=self.pred_config.use_dynamic_shape,
            trt_min_shape=trt_min_shape,
            trt_max_shape=trt_max_shape,
            trt_opt_shape=trt_opt_shape,
            trt_calib_mode=trt_calib_mode,
            cpu_threads=cpu_threads,
            enable_mkldnn=enable_mkldnn,
            enable_mkldnn_bfloat16=enable_mkldnn_bfloat16,
            delete_shuffle_pass=delete_shuffle_pass,
        )
        self.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0  # cpu占用，GPU占用等数据
        self.batch_size = batch_size
        self.threshold = threshold
        self.run_mode = run_mode

    def set_config(self, model_dir):
        return PredictConfig(model_dir)

    def buid_transformer(self):
        """根据infer.yml构建图像预处理流"""
        preprocess_ops = []
        # 根据infer.yml中的参数构建预处理流
        for op_info in self.pred_config.preprocess_infos:
            new_op_info = op_info.copy()
            op_type = new_op_info.pop("type")
            preprocess_ops.append(eval(op_type)(**new_op_info))
        return preprocess_ops

    def preprocess(self, image_list):
        """预处理函数, 对图片进行transform后重构为模型的标准输入格式并导入模型输入节点"""
        input_im_lst = []  # 保存图像数据的list
        input_im_info_list = []  # 保存对应图像的相关信息, 比如shape, sacle
        for im in image_list:
            # 执行tramsformmer
            im, im_info = preprocess(im, self.transformer)
            input_im_lst.append(im)
            input_im_info_list.append(im_info)

        # 将数据重构为模型标准输入格式
        input_names = self.predictor.get_input_names()  # 获取模型的输入节点名
        if self.run_mode == "lite":
            inputs = create_inputs_lite(
                input_im_lst, input_im_info_list, self.batch_size
            )
            for i in range(len(input_names)):
                input_tensor = self.predictor.get_input_by_name(
                    input_names[i]
                )  # 根据节点名获取节点句柄
                if input_names[i] in [
                    "x",
                    "image",
                ]:  # x节点即图像数据输入节点, 部分clas模型使用x节点而不是image节点作为输入节点
                    input_tensor.from_numpy(inputs["image"])
                else:  # 其余节点名与inputs中的key同名
                    input_tensor.from_numpy(inputs[input_names[i]])
        else:
            inputs = create_inputs(
                input_im_lst, input_im_info_list
            )  # inputs = {"image":[], "im_shape":[], "scale_factor":[]}
            for i in range(len(input_names)):
                input_tensor = self.predictor.get_input_handle(
                    input_names[i]
                )  # 根据节点名获取节点句柄
                if input_names[i] == "x":  # x节点即图像数据输入节点
                    input_tensor.copy_from_cpu(inputs["image"])
                else:  # 其余节点名与inputs中的key同名
                    input_tensor.copy_from_cpu(inputs[input_names[i]])
        return inputs

    def postprocess(self, inputs, result):
        """后处理函数, 将数据重构为标准输出格式
        Returns:
            result (dict): {key: value in result.items()}
        """
        np_boxes_num = result["boxes_num"]
        if sum(np_boxes_num) <= 0:
            print("[WARNNING] No object detected.")
            result = {"boxes": np.zeros([0, 6]), "boxes_num": np_boxes_num}
        result = {k: v for k, v in result.items() if v is not None}
        return result

    def predict(self, repeats=1):
        """
        Args:
            repeats (int): repeats number for prediction
        Returns:
            result (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
                            matix element:[class, score, x_min, y_min, x_max, y_max]
                            MaskRCNN's result include 'masks': np.ndarray:
                            shape: [N, im_h, im_w]
        """
        # model prediction
        np_boxes, np_masks = None, None
        for i in range(repeats):
            self.predictor.run()
            output_names = self.predictor.get_output_names()
            if (
                self.run_mode == "lite"
            ):  # paddle-lite 的预测执行流程, 其get output 句柄的方法略有不同
                boxes_tensor = self.predictor.get_output_by_name(output_names[0])
                np_boxes = boxes_tensor.numpy()
                boxes_num = self.predictor.get_output_by_name(output_names[1])
                np_boxes_num = boxes_num.numpy()
                if self.pred_config.mask:
                    masks_tensor = self.predictor.get_output_by_name(output_names[2])
                    np_masks = masks_tensor.copy_to_cpu()
            else:
                boxes_tensor = self.predictor.get_output_handle(output_names[0])
                np_boxes = boxes_tensor.copy_to_cpu()
                boxes_num = self.predictor.get_output_handle(output_names[1])
                np_boxes_num = boxes_num.copy_to_cpu()
                if self.pred_config.mask:  # 是否有检测后分割
                    masks_tensor = self.predictor.get_output_handle(output_names[2])
                    np_masks = masks_tensor.copy_to_cpu()
        result = dict(boxes=np_boxes, masks=np_masks, boxes_num=np_boxes_num)
        return result

    # 将batch下所有检测结果整合在一起
    def merge_batch_result(self, batch_result):
        if len(batch_result) == 1:
            return batch_result[0]
        res_key = batch_result[0].keys()
        results = {k: [] for k in res_key}
        for res in batch_result:
            for k, v in res.items():
                results[k].append(v)
        for k, v in results.items():
            if k not in ["masks", "segm"]:
                results[k] = np.concatenate(v)
        return results

    # 流检测
    def __call__(self, data):
        """
        input:
            data:
            only_whole: 是否仅返回被完整检测到的物体

        """
        images = data[self.keys["in"]]
        total_result = {"boxes": [], "boxes_num": []}

        start_id = 0
        while start_id < len(images):
            batch_image = images[start_id : start_id + self.batch_size]
            start_id += self.batch_size
            # preprocess 预处理
            inputs = self.preprocess(batch_image)

            # model prediction 模型预测
            result = self.predict()  # 获取计算结果

            # postprocess 后处理
            result = self.postprocess(inputs, result)
            # 累计
            for k, v in result.items():
                total_result[k].append(v)

        for k, v in total_result.items():
            total_result[k] = np.concatenate(total_result[k])
        total_result = filter_box(
            total_result,
            self.threshold,
        )
        data[self.keys["out"]] = total_result
        return data

    def release(
        self,
    ):
        self.predictor.clear_intermediate_tensor()
        self.predictor.try_shrink_memory()


def filter_box(result, threshold):
    """boxes筛选函数
    params:
    """
    np_boxes_num = result["boxes_num"]
    boxes = result["boxes"]
    start_idx = 0
    filter_boxes = []
    for i in range(
        len(np_boxes_num)
    ):  # batch格式的结果, np_boxes_num中保存了batch中每张图片中的box数量
        boxes_num = np_boxes_num[i]
        boxes_i = boxes[start_idx : start_idx + boxes_num, :]  # 顺着batch取
        idx = boxes_i[:, 1] > threshold  # 筛出大于阈值的box
        filter_boxes_i = boxes_i[idx, :]
        filter_boxes.append(filter_boxes_i.tolist())
        start_idx += boxes_num
    filter_res = {
        "boxes": filter_boxes,
    }
    return filter_res


# infer_cfg.yml文件解析器
class PredictConfig:
    """set config of preprocess, postprocess and visualize
    Args:
        model_dir (str): root path of model.yml
    """

    def __init__(self, model_dir):
        # parsing Yaml config for Preprocess
        deploy_file = os.path.join(model_dir, "infer_cfg.yml")  # 读取配置文件
        with open(deploy_file) as f:
            yml_conf = yaml.safe_load(f)
        self.check_model(yml_conf)  # 模型支持检查
        self.arch = yml_conf["arch"]  # 模型结构类型，比如YOLO，SSD，RCNN
        self.preprocess_infos = yml_conf[
            "Preprocess"
        ]  # 预处理信息，resize，归一化，BGR转RGB
        self.min_subgraph_size = yml_conf[
            "min_subgraph_size"
        ]  # 最小通道数, TensorRT的相关参数
        self.labels = yml_conf["label_list"]  # label_list 类名列表
        self.mask = False
        self.use_dynamic_shape = yml_conf["use_dynamic_shape"]  # 是否使用动态输入大小
        if "mask" in yml_conf:
            self.mask = yml_conf["mask"]
        self.tracker = None
        if "tracker" in yml_conf:
            self.tracker = yml_conf["tracker"]
        if "NMS" in yml_conf:  # NMS定义, 不同检测模型其NMS是不同的
            self.nms = yml_conf["NMS"]
        if "fpn_stride" in yml_conf:  # 跳帧幅度
            self.fpn_stride = yml_conf["fpn_stride"]
        if self.arch == "RCNN" and yml_conf.get("export_onnx", False):
            print(
                "The RCNN export model is used for ONNX and it only supports batch_size = 1"
            )
        self.print_config()

    def check_model(self, yml_conf):
        """
        Raises:
            ValueError: loaded model not in supported model type
        """
        for support_model in SUPPORT_MODELS:
            if support_model in yml_conf["arch"]:
                return True
        raise ValueError(
            "Unsupported arch: {}, expect {}".format(yml_conf["arch"], SUPPORT_MODELS)
        )

    def print_config(self):
        print("-----------  Model Configuration -----------")
        print("%s: %s" % ("Model Arch", self.arch))
        print("%s: " % ("Transform Order"))
        for op_info in self.preprocess_infos:
            print("--%s: %s" % ("transform op", op_info["type"]))
        print("--------------------------------------------")


if __name__ == "__main__":
    from glob import glob
    import cv2

    model_dir = "projects/Nan2Shui3/weights/ppyoloe_crn_coco_Nan2Shui3"
    file_list = list(glob("data/input/*"))
    detector = Detector(
        model_dir,
        keys={
            "in": "input_data",
            "out": "det",
        },
        device="cuda:0",
        run_mode="paddle",
    )
    for file_path in file_list:
        img_data = cv2.imread(file_path)[None, ...]
        data = {"input_data": img_data}
        result_det = detector(data)
    print("Done")
