import sys

# 添加自定义的 OpenCV 编译路径（仅当前脚本会生效）
sys.path.insert(0, "/usr/local/lib/python3.8/site-packages/cv2/python-3.8")
import cv2


import os
import numpy as np
import torch
import logging

# 兼容旧代码对 np.bool 的引用
if not hasattr(np, "bool"):
    np.bool = bool  # 或者 np.bool_，视你是否需要 NumPy 标量类型

import tensorrt as trt
import pycuda.driver as cuda
from torchvision import transforms
from typing import Sequence, Union
from copy import deepcopy
from collections import defaultdict
from mmengine.structures import PixelData

from .preprocess import (
    LoadImageFromNDArray,
    Resize,
    PackSegInputs,
    SegDataPreProcessor,
    resize,
    SegDataSample,
)
from .post_pipeline import mapillay_postprocess
from .metainfo_custom import metainfo
from .vis_utils import show_result_pyplot

ImageType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]]

TRT_LOGGER = trt.Logger(trt.Logger.INFO)

logging.info(cv2.__file__)
logging.info(cv2.__version__)


class TensorRTInfer:
    """
    image_seg_and_point
    该类用于图像分割和目标点生成
    """

    def __init__(self, engine_path, infer_cfg):
        # cuda.init()
        self.cfg = infer_cfg
        self.use_postprocess = infer_cfg.get("use_postprocess", True)
        self.engine_path = engine_path

        self.confidence_threshold = infer_cfg.get("confidence_threshold", None)
        self.previous_res = None
        self.test_pipeline = transforms.Compose(
            [
                LoadImageFromNDArray(),
                Resize(
                    keep_ratio=infer_cfg["test_pipeline"]["keep_ratio"],
                    scale=(
                        infer_cfg["test_pipeline"]["scale"][0],
                        infer_cfg["test_pipeline"]["scale"][1],
                    ),
                ),
                PackSegInputs(),
            ]
        )
        data_preprocessor = dict(
            bgr_to_rgb=True,
            mean=[
                123.675,
                116.28,
                103.53,
            ],
            pad_val=0,
            seg_pad_val=255,
            size=(
                infer_cfg["data_preprocessor"]["size"][0],
                infer_cfg["data_preprocessor"]["size"][1],
            ),
            std=[
                58.395,
                57.12,
                57.375,
            ],
        )
        self.data_preprocessor = SegDataPreProcessor(**data_preprocessor)
        self.buffer_cache = {}

        # deeplabv3plus原始类别：
        # 0:"车行道路类", 1:"人行道路类", 2:"自然区域", 3:"禁行区",
        # 4:"交通信号灯", 5:"人行横道", 6:"道路标线", 7:"路沿石",
        # 8:"障碍物", 9:"背景",

        # 映射后：
        # 0：背景（背景9）
        # 1：平坦(车行道路0、人行道路1、人行横道5、道路标线6)，
        # 2：粗糙（自然区域2），
        # 3: 颠簸（路沿石7），
        # 4：禁区（禁行区3），
        # 5：障碍物（交通信号灯4、障碍物8），

        self.label_map = {0: 1, 1: 1, 2: 2, 3: 4, 4: 5, 5: 1, 6: 1, 7: 3, 8: 5, 9: 0}

    def initialize(self):
        cuda.init()
        self.device = cuda.Device(0)  # 选择GPU 0
        self.ctx = self.device.make_context()

        assert os.path.exists(self.engine_path)
        with open(self.engine_path, "rb") as f, trt.Runtime(
            trt.Logger(trt.Logger.INFO)
        ) as runtime:
            self.engine = runtime.deserialize_cuda_engine(f.read())

        # 找到所有输入和输出绑定的索引
        self.input_binding_indices = []
        self.output_binding_indices = []

        for idx in range(self.engine.num_bindings):
            if self.engine.binding_is_input(idx):
                self.input_binding_indices.append(idx)
            else:
                self.output_binding_indices.append(idx)

        # 如果有输入绑定，设置第一个为主输入绑定
        if self.input_binding_indices:
            self.input_binding_idx = self.input_binding_indices[0]
        else:
            raise RuntimeError("模型没有输入绑定")

        # 打印模型绑定信息，帮助调试
        logging.info("model binding information:")
        for idx in range(self.engine.num_bindings):
            name = self.engine.get_binding_name(idx)
            is_input = self.engine.binding_is_input(idx)
            shape = self.engine.get_binding_shape(idx)
            logging.info(
                f"    Binding {idx}: {name}, input: {is_input}, shape: {shape}"
            )

        # **提前**创建执行上下文，用于后续动态shape查询
        self.context = self.engine.create_execution_context()
        self.stream = cuda.Stream()

    def get_or_create_buffers(self, batch_shape):
        """根据输入形状获取或创建缓冲区"""
        # 使用输入形状作为缓存键
        cache_key = tuple(batch_shape)

        if cache_key in self.buffer_cache:
            return self.buffer_cache[cache_key]

        # 如果缓存中没有，创建新的缓冲区
        bindings = [None] * self.engine.num_bindings
        host_inputs = []
        dev_inputs = []
        host_outputs = []
        dev_outputs = []

        # 设置动态形状
        for idx in self.input_binding_indices:
            if idx == self.input_binding_idx:
                self.context.set_binding_shape(idx, batch_shape)
            else:
                # 处理其他输入...
                other_shape = list(self.engine.get_binding_shape(idx))
                for i in range(len(other_shape)):
                    if other_shape[i] == -1:
                        if i == 0:
                            other_shape[i] = batch_shape[0]
                        elif i == 2 or i == 3:
                            other_shape[i] = batch_shape[i]
                self.context.set_binding_shape(idx, tuple(other_shape))

        # 分配内存
        for idx in range(self.engine.num_bindings):
            dims = self.context.get_binding_shape(idx)
            shape = tuple(dims)
            dtype = trt.nptype(self.engine.get_binding_dtype(idx))

            host_mem = cuda.pagelocked_empty(shape, dtype=dtype)
            dev_mem = cuda.mem_alloc(host_mem.nbytes)

            bindings[idx] = int(dev_mem)
            if self.engine.binding_is_input(idx):
                host_inputs.append(host_mem)
                dev_inputs.append(dev_mem)
            else:
                host_outputs.append(host_mem)
                dev_outputs.append(dev_mem)

        # 存入缓存
        buffers = (bindings, host_inputs, dev_inputs, host_outputs, dev_outputs)
        self.buffer_cache[cache_key] = buffers
        return buffers

    def prepare_data(self, img):
        data = defaultdict(list)

        data_ = self.test_pipeline(dict(img=img))
        data_input = data_["inputs"]
        data_sample = data_["data_samples"]
        data["inputs"].append(data_input)
        data["data_samples"].append(data_sample)

        data = self.data_preprocessor(data, False)

        return data["inputs"].numpy().astype(np.float32), data["data_samples"]

    def infer(self, frame: np.ndarray):
        # 1) 预处理
        inp_array, data_samples = self.prepare_data(frame)
        batch_shape = inp_array.shape

        # 2) 获取或创建缓冲区
        bindings, host_inputs, dev_inputs, host_outputs, dev_outputs = (
            self.get_or_create_buffers(batch_shape)
        )

        # 3) 执行推理
        # 拷贝输入数据
        np.copyto(host_inputs[0], inp_array)
        cuda.memcpy_htod_async(dev_inputs[0], host_inputs[0], self.stream)

        # 执行推理
        self.context.execute_async_v2(
            bindings=bindings, stream_handle=self.stream.handle
        )

        # 从TensorRT输出创建PyTorch张量制
        if self.use_postprocess:
            dtype = torch.float32
        else:
            dtype = torch.int32
        out_shape = host_outputs[0].shape
        seg_mask = torch.empty(out_shape, dtype=dtype, device="cuda")

        # 获取CUDA指针
        gpu_ptr = seg_mask.data_ptr()

        # 等待TensorRT完成
        self.stream.synchronize()
        # 创建与TensorRT输出相同的CuPy数组
        dev_ptr = int(dev_outputs[0])
        # 使用CuPy或CUDA Driver API进行设备到设备的复制
        cuda.memcpy_dtod(gpu_ptr, dev_ptr, seg_mask.numel() * 4)  # 假设float32为4字节

        # 4) 后处理（直接使用GPU上的数据）
        seg_mask = seg_mask.reshape(*out_shape)  # 确保形状正确

        if self.use_postprocess:
            post_res = self.postprocess_result(seg_mask, data_samples)
        else:
            post_res = data_samples[0]
            post_res.set_data(
                {
                    # 'seg_logits':
                    #     PixelData(**{'data': seg_logit}),
                    "pred_sem_seg": PixelData(**{"data": seg_mask[0]})
                }
            )

        return post_res

    def remap_labels(self, res):
        if isinstance(res, SegDataSample):
            new_res = deepcopy(res)
            seg_map = res.pred_sem_seg.data
            seg_map_new = torch.zeros_like(seg_map)
            for old_label, new_label in self.label_map.items():
                seg_map_new[seg_map == old_label] = new_label
            new_res.pred_sem_seg.data = seg_map_new
        elif isinstance(res, np.ndarray):
            new_res = np.zeros_like(res)
            for old_label, new_label in self.label_map.items():
                new_res[res == old_label] = new_label
        elif isinstance(res, torch.Tensor):
            new_res = torch.zeros_like(res)
            for old_label, new_label in self.label_map.items():
                new_res[res == old_label] = new_label
        else:
            raise ValueError

        return new_res

    def postprocess_result(self, seg_logit, data_samples):

        _, C, H, W = seg_logit.shape

        img_meta = data_samples[0].metainfo
        # remove padding area
        if "img_padding_size" not in img_meta:
            padding_size = img_meta.get("padding_size", [0] * 4)
        else:
            padding_size = img_meta["img_padding_size"]
        padding_left, padding_right, padding_top, padding_bottom = padding_size
        # i_seg_logits shape is 1, C, H, W after remove padding
        seg_logit = seg_logit[
            :, :, padding_top : H - padding_bottom, padding_left : W - padding_right
        ]

        flip = img_meta.get("flip", None)
        if flip:
            flip_direction = img_meta.get("flip_direction", None)
            assert flip_direction in ["horizontal", "vertical"]
            if flip_direction == "horizontal":
                seg_logit = seg_logit.flip(dims=(3,))
            else:
                seg_logit = seg_logit.flip(dims=(2,))

        # resize as original shape
        seg_logit = resize(
            seg_logit,
            size=img_meta["ori_shape"],
            mode="bilinear",
            align_corners=False,
            warning=False,
        ).squeeze(0)

        if C > 1:
            seg_pred = seg_logit.argmax(dim=0, keepdim=True)
        else:
            seg_logit = seg_logit.sigmoid()
            seg_pred = (seg_logit > self.decode_head.threshold).to(seg_logit)

        data_samples[0].set_data(
            {
                "seg_logits": PixelData(**{"data": seg_logit}),
                "pred_sem_seg": PixelData(**{"data": seg_pred}),
            }
        )

        post_result = mapillay_postprocess(
            data_samples[0],
            self.confidence_threshold,
            postprocess=True,
            previous_segmentation_map=self.previous_res,
        )
        self.previous_res = deepcopy(post_result)

        return post_result

    def clean(self):
        # def __del__(self):
        logging.info("release tensorrt resources start")
        # 释放 CUDA 内存
        if hasattr(self, "inputs"):
            for inp in self.inputs:
                inp[1].free()
        if hasattr(self, "outputs"):
            for out in self.outputs:
                out[1].free()
        # 释放 TensorRT 上下文和引擎
        if hasattr(self, "context"):
            del self.context  # 显式销毁执行上下文
        if hasattr(self, "engine"):
            del self.engine  # 显式销毁引擎（如果有）
        # 释放 CUDA 流
        if hasattr(self, "stream"):
            self.stream.synchronize()
            del self.stream
        # 释放 CUDA 上下文
        if hasattr(self, "ctx") and self.ctx:
            self.ctx.detach()
        # self.ctx.pop()
        logging.info("release tensorrt resources end")

    def visualize(
        self,
        frame,
        result,
    ):
        vis_meta = metainfo[self.cfg.get("custom_palette", "mapillay")]
        draw_img_post = show_result_pyplot(
            frame,
            result=result,
            classes=vis_meta["classes"],
            palette=vis_meta["palette"],
            opacity=0.6,
            with_labels=False,
            draw_gt=False,
            show=False,
            conf=self.confidence_threshold,
        )
        return draw_img_post
