import numpy as np
from pathlib import Path
import os, yaml, math, cv2, sys

parent_path = os.path.abspath(os.path.join(__file__, *([".."] * 7)))
sys.path.insert(0, parent_path)

from core.algorithm.deeplearning.paddle.detectors.seg.seg_preprocess import (
    Normalize,
    preprocess,
)
from core.algorithm.deeplearning.paddle.detectors.det.utils import (
    load_predictor,
    create_inputs,
)


# 通用语义分割模型检测器
class Detector(object):
    def __init__(
        self,
        model_dir,
        keys,
        device="CPU",
        run_mode="paddle",
        batch_size=1,
        trt_min_shape=[100, 100],
        trt_max_shape=[3000, 2000],
        trt_opt_shape=[1024, 512],
        trt_calib_mode=False,
        cpu_threads=1,
        enable_mkldnn=False,
        enable_mkldnn_bfloat16=False,
        delete_shuffle_pass=False,
        level=1,
    ):
        self.level = level
        if self.level == 1:
            self.running = self.run_level_1
        elif self.level == 2:
            self.running = self.run_level_2
        else:
            raise Exception("wrong level for detector")
        self.keys = keys
        self.pred_config = self.set_config(
            model_dir
        )  # 读取文件夹下的infer_cfg.yml配置文件
        # 根据pred_config构建transformer
        self.transformer = self.buid_transformer()
        # 构建预测器
        self.predictor, self.config = load_predictor(
            model_dir,
            run_mode=run_mode,
            batch_size=batch_size,
            min_subgraph_size=3,
            device=device,
            use_dynamic_shape=True,
            trt_min_shape=trt_min_shape,
            trt_max_shape=trt_max_shape,
            trt_opt_shape=trt_opt_shape,
            trt_calib_mode=trt_calib_mode,
            cpu_threads=cpu_threads,
            enable_mkldnn=enable_mkldnn,
            enable_mkldnn_bfloat16=enable_mkldnn_bfloat16,
            delete_shuffle_pass=delete_shuffle_pass,
        )
        self.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0  # cpu占用，GPU占用等数据
        self.batch_size = batch_size

    def set_config(self, model_dir):
        return PredictConfig(model_dir)

    def buid_transformer(self):
        """根据infer.yml构建图像预处理流"""
        preprocess_ops = []
        # 根据infer.yml中的参数构建预处理流
        for op_info in self.pred_config.preprocess_infos:
            new_op_info = op_info.copy()
            op_type = new_op_info.pop("type")
            preprocess_ops.append(eval(op_type)(**new_op_info))
        return preprocess_ops

    def preprocess(self, image_list):
        """预处理函数, 对图片进行transform后重构为模型的标准输入格式并导入模型输入节点"""
        input_im_lst = []  # 保存图像数据的list
        input_im_info_list = []  # 保存对应图像的相关信息, 比如shape, sacle
        for im in image_list:
            # 执行tramsformmer
            im, im_info = preprocess(im, self.transformer)
            input_im_lst.append(im)
            input_im_info_list.append(im_info)
        # 将数据重构为模型标准输入格式
        inputs = create_inputs(
            input_im_lst, input_im_info_list
        )  # inputs = {"image":[], "im_shape":[], "scale_factor":[]}
        input_names = self.predictor.get_input_names()  # 获取模型的输入节点名
        for i in range(len(input_names)):
            input_tensor = self.predictor.get_input_handle(
                input_names[i]
            )  # 根据节点名获取节点句柄
            if input_names[i] == "x":  # x节点即图像数据输入节点
                input_tensor.copy_from_cpu(inputs["image"])
            else:  # 其余节点名与inputs中的key同名
                input_tensor.copy_from_cpu(inputs[input_names[i]])
        return inputs

    def postprocess(self, result, img_info):
        """后处理"""
        masks = []
        for mask, im_shape in zip(result["masks"], img_info["im_shape"]):
            mask = mask.astype(np.uint8)[: int(im_shape[0]), : int(im_shape[1])]
            masks.append(mask)
            pass
        result["masks"] = masks
        return result

    def predict(self, repeats=1):
        """
        Args:
            repeats (int): repeats number for prediction
        Returns:
            result (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
                            matix element:[class, score, x_min, y_min, x_max, y_max]
                            MaskRCNN's result include 'masks': np.ndarray:
                            shape: [N, im_h, im_w]
        """
        # model prediction
        np_masks = None
        for i in range(repeats):
            self.predictor.run()
            output_names = self.predictor.get_output_names()
            masks_tensor = self.predictor.get_output_handle(output_names[0])
            np_masks = masks_tensor.copy_to_cpu()
            if len(np_masks.shape) == 4:  # 没有在模型里添加argmax, 手动操作
                np_masks = np.argmax(np_masks, 1)

        result = dict(masks=np_masks)
        return result

    def run_level_1(self, data):
        # preprocess 预处理

        img_data = data[self.keys["in"]]
        inputs = self.preprocess(img_data)
        # model prediction 模型预测
        result = self.predict()  # 获取计算结果
        result = self.postprocess(result, inputs)
        data[self.keys["out"]] = result
        return data

    def run_level_2(self, data):
        results = []
        img_data = data[self.keys["in"]]
        for idx, data_ in enumerate(img_data):
            # 针对二级任务
            # preprocess 预处理
            inputs = self.preprocess(img_data[idx]["imgs"])
            # model prediction 模型预测
            result = self.predict()  # 获取计算结果
            result = self.postprocess(result, inputs)
            results.append(result)
        data[self.keys["out"]] = results
        return data

    # 流检测
    def __call__(self, data):
        data = self.running(data)
        return data

    def release(
        self,
    ):
        self.predictor.clear_intermediate_tensor()
        self.predictor.try_shrink_memory()


# infer_cfg.yml文件解析器
class PredictConfig:
    """set config of preprocess, postprocess and visualize
    Args:
        model_dir (str): root path of model.yml
    """

    def __init__(self, model_dir):

        # parsing Yaml config for Preprocess
        deploy_file = os.path.join(model_dir, "deploy.yaml")  # 读取配置文件
        with open(deploy_file) as f:
            yml_conf = yaml.safe_load(f)
        self.preprocess_infos = yml_conf["Deploy"][
            "transforms"
        ]  # 预处理信息，resize，归一化，BGR转RGB
        # 读取 label_list 类名列表, paddleseg默认的配置文件不含有这个, 要手动添加
        if "label_list" in yml_conf.keys():
            self.labels = yml_conf["label_list"]
        else:
            self.labels = None
        self.print_config()

    def print_config(self):
        print("-----------  Model Configuration -----------")
        print("%s: %s" % ("Model Arch", "seg"))
        print("%s: " % ("Transform Order"))
        for op_info in self.preprocess_infos:
            print("--%s: %s" % ("transform op", op_info["type"]))
        print("--------------------------------------------")


if __name__ == "__main__":
    from glob import glob
    from core.algorithm.deeplearning.paddle.detectors.seg.seg_utils import visualize

    model_dir = "projects/traffic/weights/deeplabv3p_resnet50_os8_motorway_1024x512"
    file_list = list(glob("data/input/traffic/*.jpg"))
    detector = Detector(
        model_dir,
        keys={
            "in": "input_data",
            "out": "seg",
        },
        device="cuda:0",
        run_mode="paddle",
    )
    for file_path in file_list:
        img_data = cv2.imread(file_path)[None, ...]
        data = {"input_data": img_data}
        result_seg = detector(data)
        vis_res = visualize(
            img_data, results=result_seg["seg"]["masks"].astype(np.uint8)
        )
        cv2.imwrite("data/output/temp.jpg", vis_res[0])
    print("Done")
