import os, sys

parent_path = os.path.abspath(os.path.join(__file__, *([".."] * 7)))
sys.path.insert(0, parent_path)

from core.algorithm.deeplearning.paddle.detectors.det.detector import Detector
from core.algorithm.deeplearning.paddle.detectors.processors import (
    preprocess,
    decode_image,
    NormalizeImage,
    Permute,
)
from core.algorithm.deeplearning.paddle.detectors.keypoint.keypoint_preprocess import (
    EvalAffine,
    TopDownEvalAffine,
    Padding,
)
from core.algorithm.deeplearning.paddle.detectors.keypoint.keypoint_utils import (
    create_inputs,
    create_inputs_lite,
)
from core.algorithm.deeplearning.paddle.detectors.keypoint.keypoint_postprocess import (
    HrHRNetPostProcess,
    HRNetPostProcess,
    translate_to_ori_images,
)
import yaml, math
import numpy as np
from collections import deque

# Global dictionary
KEYPOINT_SUPPORT_MODELS = {
    "HigherHRNet": "keypoint_bottomup",
    "HRNet": "keypoint_topdown",
}


class KeyPointDetector(Detector):

    def __init__(
        self,
        model_dir,
        keys,
        device="CPU",
        run_mode="paddle",
        batch_size=1,
        trt_min_shape=1,
        trt_max_shape=1280,
        trt_opt_shape=640,
        trt_calib_mode=False,
        cpu_threads=1,
        enable_mkldnn=False,
        enable_mkldnn_bfloat16=False,
        threshold=0.5,
        use_dark=True,
    ):
        super().__init__(
            model_dir,
            keys,
            device,
            run_mode,
            batch_size,
            trt_min_shape,
            trt_max_shape,
            trt_opt_shape,
            trt_calib_mode,
            cpu_threads,
            enable_mkldnn,
            enable_mkldnn_bfloat16,
            threshold,
        )
        self.use_dark = use_dark

    def set_config(self, model_dir):
        return PredictConfig_KeyPoint(model_dir)

    def buid_transformer(self):
        """根据infer.yml构建图像预处理流"""
        preprocess_ops = []
        # 根据infer.yml中的参数构建预处理流
        for op_info in self.pred_config.preprocess_infos:
            new_op_info = op_info.copy()
            op_type = new_op_info.pop("type")
            preprocess_ops.append(eval(op_type)(**new_op_info))
        return preprocess_ops

    def preprocess(self, image_list):
        """预处理函数, 对图片进行transform后重构为模型的标准输入格式并导入模型输入节点"""
        input_im_lst = []  # 保存图像数据的list
        input_im_info_list = []  # 保存对应图像的相关信息, 比如shape, sacle
        for im in image_list:
            # 执行tramsformmer
            im, im_info = preprocess(im, self.transformer)
            input_im_lst.append(im)
            input_im_info_list.append(im_info)
        # 将数据重构为模型标准输入格式
        input_names = self.predictor.get_input_names()  # 获取模型的输入节点名
        if self.run_mode == "lite":
            inputs = create_inputs_lite(
                input_im_lst, input_im_info_list, self.batch_size
            )
            for i in range(len(input_names)):
                input_tensor = self.predictor.get_input_by_name(
                    input_names[i]
                )  # 根据节点名获取节点句柄
                input_tensor.from_numpy(inputs[input_names[i]])
        else:
            inputs = create_inputs(
                input_im_lst, input_im_info_list
            )  # inputs = {"image":[], "im_shape":[], "scale_factor":[]}
            for i in range(len(input_names)):
                input_tensor = self.predictor.get_input_handle(
                    input_names[i]
                )  # 根据节点名获取节点句柄
                if input_names[i] == "x":  # x节点即图像数据输入节点
                    input_tensor.copy_from_cpu(inputs["image"])
                else:  # 其余节点名与inputs中的key同名
                    input_tensor.copy_from_cpu(inputs[input_names[i]])
        return inputs

    def postprocess(self, inputs: dict, result):
        np_heatmap = result["heatmap"][: len(inputs["im_shape"])]
        if not result["masks"] is None:
            np_masks = result["masks"][: len(inputs["im_shape"])]
        else:
            np_masks = None
        # np_heatmap = result['heatmap']
        # np_masks = result['masks']
        # postprocess output of predictor
        if KEYPOINT_SUPPORT_MODELS[self.pred_config.arch] == "keypoint_bottomup":
            results = {}
            h, w = inputs["im_shape"][0]
            preds = [np_heatmap]
            if np_masks is not None:
                preds += np_masks
            preds += [h, w]
            keypoint_postprocess = HrHRNetPostProcess()
            kpts, scores = keypoint_postprocess(*preds)
            results["keypoint"] = kpts
            results["score"] = scores
            return results
        elif KEYPOINT_SUPPORT_MODELS[self.pred_config.arch] == "keypoint_topdown":
            results = {}
            imshape = inputs["im_shape"][:, ::-1]  # hw 转 wh, 方便转换[x,y]形式的坐标点
            # 在预处理中padding操作会添加wh_pad, 如果没有则置0. padding操作会对输入图像进行补0使其的高宽比达到指定值
            wh_pad = inputs.get("wh_pad", np.array([0, 0], np.float32))
            center = np.round(imshape / 2.0)
            scale = imshape / 200.0
            keypoint_postprocess = HRNetPostProcess(use_dark=self.use_dark)
            kpts, scores = keypoint_postprocess(
                np_heatmap, center, wh_pad, scale
            )  # 后处理, 将热力图转化为关键点坐标
            results["keypoint"] = kpts
            results["score"] = scores

            return results
        else:
            raise ValueError(
                "Unsupported arch: {}, expect {}".format(
                    self.pred_config.arch, KEYPOINT_SUPPORT_MODELS
                )
            )

    def predict(self, repeats=1):
        np_heatmap, np_masks = None, None
        for _ in range(repeats):
            self.predictor.run()
            output_names = self.predictor.get_output_names()
            if self.run_mode == "lite":  # paddle-lite
                heatmap_tensor = self.predictor.get_output_by_name(output_names[0])
                np_heatmap = heatmap_tensor.numpy()
            else:  # paddle/trt
                heatmap_tensor = self.predictor.get_output_handle(output_names[0])
                np_heatmap = heatmap_tensor.copy_to_cpu()
            if self.pred_config.tagmap:
                pass
        result = dict(heatmap=np_heatmap, masks=np_masks)
        return result

    def __call__(self, data):
        crop_targets_data = data[self.keys["in"]]
        results = []
        for data_ in crop_targets_data:
            single_img_res = []
            image_list = data_["imgs"]
            batch_loop_cnt = math.ceil(float(len(image_list)) / self.batch_size)
            for i in range(batch_loop_cnt):
                start_index = i * self.batch_size
                end_index = min((i + 1) * self.batch_size, len(image_list))
                batch_image_list = image_list[start_index:end_index]
                # preprocess
                inputs = self.preprocess(batch_image_list)

                # model prediction
                result = self.predict()

                # postprocess
                result = self.postprocess(inputs, result)

                single_img_res.append(result)
            if len(single_img_res) > 0:
                single_img_res = self.merge_batch_result(single_img_res)
                keypoint_vector, score_vector = translate_to_ori_images(
                    single_img_res, np.array(data_["boxes"])
                )
                single_img_res["keypoint"] = keypoint_vector
                single_img_res["score"] = score_vector
            else:
                single_img_res = {}
            results.append(single_img_res)
        data[self.keys["out"]] = results
        return data


class PredictConfig_KeyPoint:
    """set config of preprocess, postprocess and visualize
    Args:
        model_dir (str): root path of model.yml
    """

    def __init__(self, model_dir):
        # parsing Yaml config for Preprocess
        deploy_file = os.path.join(model_dir, "infer_cfg.yml")
        with open(deploy_file) as f:
            yml_conf = yaml.safe_load(f)
        self.check_model(yml_conf)
        self.arch = yml_conf["arch"]
        self.archcls = KEYPOINT_SUPPORT_MODELS[yml_conf["arch"]]
        self.preprocess_infos = yml_conf["Preprocess"]
        self.min_subgraph_size = yml_conf["min_subgraph_size"]
        self.labels = yml_conf["label_list"]
        self.tagmap = False
        self.use_dynamic_shape = yml_conf["use_dynamic_shape"]
        if "keypoint_bottomup" == self.archcls:
            self.tagmap = True
        self.print_config()

    def check_model(self, yml_conf):
        """
        Raises:
            ValueError: loaded model not in supported model type
        """
        for support_model in KEYPOINT_SUPPORT_MODELS:
            if support_model in yml_conf["arch"]:
                return True
        raise ValueError(
            "Unsupported arch: {}, expect {}".format(
                yml_conf["arch"], KEYPOINT_SUPPORT_MODELS
            )
        )

    def print_config(self):
        print("-----------  Model Configuration -----------")
        print("%s: %s" % ("Model Arch", self.arch))
        print("%s: " % ("Transform Order"))
        for op_info in self.preprocess_infos:
            print("--%s: %s" % ("transform op", op_info["type"]))
        print("--------------------------------------------")


if __name__ == "__main__":
    from glob import glob
    import cv2

    model_dir = "projects/traffic/weights/car_lite_hrnet_30_256x256"
    file_list = list(glob("data/input/traffic/keypoints/*.jpg"))

    detector = KeyPointDetector(
        model_dir,
        keys={
            "in": "det",
            "out": "keypoint",
        },
        device="cuda:0",
    )

    for file_path in file_list:
        img_data = cv2.imread(file_path)[None, ...]
        data = {
            "det": [
                {"target_imgs": img_data},
            ]
        }
        result_det = detector(data)

    print("Done")
