import os, sys

parent_path = os.path.abspath(os.path.join(__file__, *([".."] * 3)))
sys.path.insert(0, parent_path)

import numpy as np
import yaml, cv2
import threading

from core.utils.logger import logging
from core.algorithm.modules import ProcessorSequence


class PredictPipeline(object):
    """管道预测器, 以插件的形式将检测器插入管道"""

    def __init__(self, cfg: dict) -> None:
        if isinstance(cfg, dict):
            self.cfg = cfg
            self._cfg = cfg.copy()
        elif isinstance(cfg, str):
            with open(cfg, "r") as f:
                self.cfg = yaml.safe_load(f)

        # --- 1.模型配置 ---
        self.pipeline_config = self.model_configuration(self.cfg)
        # --- 2.模型初始化 ---
        self.pipeline = self.pipeline_initialization(self.pipeline_config)

    def model_configuration(self, cfg: dict):
        """根据配置文件构建整个pipeline流程

        配置逻辑按照预处理(preprocessor)-主体-后处理(postprocessor)进行,
        包括模型插件也按这个流程处理, 最终形成pipeline的整体配置文件 pipeline_config
        args:
            return:
                pipeline_config: 管道配置文件, 包含当前管道下所有插件的配置以及管道本身的配置
        """
        pipeline_config = {}
        # --- 获取算法模块并构建执行序列 ---
        for model_tag, cfg_ in cfg.items():
            if not cfg_["enable"]:
                continue
            model_type = cfg_["type"]
            seq_idx = cfg_["exe_seq"]
            if seq_idx not in pipeline_config.keys():
                pipeline_config[seq_idx] = []
            pipeline_config[seq_idx].append(
                {
                    "type": model_type,
                    "tag": model_tag,
                    "keys": cfg_["cfg"]["keys"],
                    "preprocess": cfg_["preprocess"],
                    "postprocess": cfg_["postprocess"],
                    "cfg": cfg_["cfg"],
                }
            )
        return pipeline_config

    def pipeline_initialization(self, pipeline_config: dict):
        """管线初始化, 根据pipeline_config初始化模型并挂在到GPU上"""
        models_seq = []
        have_import_detector = False
        for idx, layer_model_cfg in pipeline_config.items():
            layer_model_seq = []
            for model_config in layer_model_cfg:
                detector_type = model_config["type"]
                if not have_import_detector:
                    have_import_detector = True
                    if "Paddle" in detector_type:
                        from core.algorithm.deeplearning.paddle.detectors import (
                            PaddleDET,
                            PaddleKPT,
                            PaddleSEG,
                        )
                    elif "MM" in detector_type:
                        from core.algorithm.deeplearning.mmdeploy.detectors import (
                            MMDET,
                            MMKPT,
                            MMOCRDET,
                            MMOCRDETSub,
                            MMOCRREC,
                        )
                    else:
                        raise Exception("wrong type of algorithm pipeline config")
                if model_config["preprocess"] == None:
                    preprocessor_ = None
                else:
                    preprocessor_ = ProcessorSequence(model_config["preprocess"])
                if model_config["postprocess"] == None:
                    postprocessor_ = None
                else:
                    postprocessor_ = ProcessorSequence(model_config["postprocess"])
                layer_model_seq.append(
                    {
                        "tag": model_config["tag"],
                        # "out": model_config["out"],
                        "predictor": eval(detector_type)(**model_config["cfg"]),
                        "preprocess": preprocessor_,
                        "postprocess": postprocessor_,
                    }
                )
            models_seq.append(layer_model_seq)
        return models_seq

    @staticmethod
    def model_process(model, data):
        if not model["preprocess"] is None:
            data = model["preprocess"](data)
        data = model["predictor"](data)
        if not model["postprocess"] is None:
            data = model["postprocess"](data)

    def __call__(self, data):
        """基于单帧数据的检测流
        input:
            data: {
                "input_data":img_array 输入图像, 应为RGB格式, shape: [batch_size, height, width, channel]
                "detector_tags":set 需要参与运算的检测器tag列表, 为空时表示所有predictor都执行, 不为空时只执行list中的tag, 该tag与config.yml中模型的tag对应
            }
        """
        if isinstance(data, np.ndarray):
            data = {"input_data": data}

        detector_tags = data.get("detector_tags", set())

        for layer_models in self.pipeline:
            if len(layer_models) > 1:  # 区分并行与单线程计算
                threads = []
                for single_model in layer_models:
                    # 跳过不需要执行的流程
                    if len(detector_tags) != 0:
                        if not single_model["tag"] in detector_tags:
                            continue
                    t = threading.Thread(
                        target=self.model_process, args=[single_model, data]
                    )
                    t.start()
                    threads.append(t)
                for t_ in threads:
                    t_.join()
            else:
                self.model_process(layer_models[0], data)
        return data

    def close(self):
        for layer_models in self.pipeline:
            for model in layer_models:
                model["predictor"].release()

    def start(self):
        pass

    def set(self):
        pass
if __name__ == "__main__":

    def pred_img():
        """
        图片预测示例, 用于模块测试
        """
        import glob, time

        cfg_file = "projects/wind_power/configs/algorithm_pipeline_cfg.yml"
        img_file_path = "data/input/wind_power"
        if os.path.isdir(img_file_path):
            img_file_path = glob.glob(img_file_path + "/*")
        else:
            img_file_path = [img_file_path]

        pipeline_predictor = PredictPipeline(cfg_file)
        a = time.time()
        for img_path in img_file_path:
            # 读取图片
            img_array = cv2.imread(img_path)[None, ...]
            start = time.time()
            data = {"input_data": np.concatenate([img_array,img_array]), "detector_tags": {}}
            result = pipeline_predictor(data)
            pipeline_predictor.close()
            end = time.time()
            logging.info("|use time {}|".format(end - start))
        b = time.time()
        logging.info("|total time: {}|".format(b - a))

    def pred_video():
        from core.task.modules.processors import Visualization
        from projects.shipping_lane.processor.custom_processor import CustomProcessor

        custom_processor = CustomProcessor(keys={"boat": "det"})
        visualization = Visualization(
            keys={
                "in": "objects",
                "out": "visualize",
            }
        )
        cfg_file = "projects/shipping_lane/configs/algorithm_pipeline_paddle.yml"
        video_path = "data/input/baixiang/vis_2.mp4"
        save_folder_path = "data/output/baixiang"
        pipeline_predictor = PredictPipeline(cfg_file)
        cap = cv2.VideoCapture(video_path)
        video_write = cv2.VideoWriter(
            save_folder_path + "/vis_" + os.path.basename(video_path),
            cv2.VideoWriter_fourcc(*"XVID"),
            25,
            (1920, 1080),
        )
        while True:
            ret, frame = cap.read()
            if not ret:
                break
            data = {"input_data": frame[None, ...]}
            data = pipeline_predictor(data)
            data = custom_processor(data)
            data = visualization(data)
            video_write.write(data["visualize"])
        video_write.release()

    pred_img()
    logging.info("|INFO:done|")
