import cv2
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit  # 必须导入以初始化 CUDA 上下文
import numpy as np

CLASSES = ['crazing', 'inclusion', 'patches', 'pitted_surface', 'rolled-in_scale', 'scratches']  # 类别列表
IMG_SIZE = (896, 896)  # 输入图像尺寸，(height, width)

class TRTInference:
    def __init__(self, engine_path):
        self.engine = self.load_engine(engine_path)
        self.context = self.engine.create_execution_context()
        
        # 分配GPU内存
        self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers()
    
    # 加载TensorRT引擎
    def load_engine(self, engine_path):
        TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
        with open(engine_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())
    
    # 分配输入输出缓冲区
    def allocate_buffers(self):
        inputs = []
        outputs = []
        bindings = []
        stream = cuda.Stream()
        
        # 使用新API获取绑定信息 (适用于TensorRT 10.x)
        for i in range(self.engine.num_io_tensors):
            name = self.engine.get_tensor_name(i)
            shape = self.engine.get_tensor_shape(name)
            trt_dtype = self.engine.get_tensor_dtype(name)
            dtype = trt.nptype(trt_dtype)
            
            # 计算张量大小，不再使用max_batch_size
            size = trt.volume(shape)
            # print('shape:', shape)  # 输出两次shape: (1, 3, 896, 896)、shape: (1, 10, 16464)
            # 如果第一个维度是动态的(-1)，则需要特殊处理
            if shape[0] == -1:
                # 对于动态批次大小，使用optimal_shape
                opt_shape = self.engine.get_tensor_profile_shape(name, 0)[1]  # [0]是profile索引，[1]是optimal shape
                size = trt.volume(opt_shape)
            
            # 获取数据类型大小
            # 创建一个该类型的实例来获取itemsize
            dtype_instance = dtype(1)
            dtype_size = dtype_instance.itemsize
            
            # 分配GPU内存
            device_mem = cuda.mem_alloc(size * dtype_size)
            bindings.append(int(device_mem))
            
            if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:
                inputs.append({'name': name, 'dtype': dtype, 'host_memory': None, 'device_memory': device_mem})
            else:
                host_mem = cuda.pagelocked_empty(size, dtype)
                outputs.append({'name': name, 'dtype': dtype, 'host_memory': host_mem, 'device_memory': device_mem})
        # print(inputs, outputs, bindings, stream)
        return inputs, outputs, bindings, stream
    
    # 预处理图像，返回 (1, 3, 896, 896) 的numpy array
    def preprocess(self, image: np.ndarray):
        # 调整大小
        resized = cv2.resize(image, IMG_SIZE)
        # BGR to RGB
        rgb = resized[:, :, ::-1]
        # HWC to CHW
        chw = np.transpose(rgb, (2, 0, 1))
        # 归一化到 [0, 1]
        normalized = chw.astype(np.float32) / 255.0
        # 添加 batch 维度
        batched = np.expand_dims(normalized, axis=0)
        # 确保内存连续
        return np.ascontiguousarray(batched)

    # 后处理YOLO输出
    def postprocess(self, output, conf_threshold=0.25):
        # 转为 numpy 数组
        output = np.array(output)  # shape: (1, 1800)
        
        if output.size == 0:
            return []
        
        # 展平并 reshape 成 (N, 6)，N=300 是 ONNX-NMS 的最大检测数
        num_detections = output.shape[-1] // 6  # 1800 // 6 = 300
        detections = output.reshape(-1, 6)  # (300, 6) 或 (1, 300, 6) → (300, 6)
        
        results = []
        for det in detections:
            x1, y1, x2, y2, conf, cls_id = det
            
            # ONNX NMS 会用 0 填充无效检测，所以过滤掉 conf=0 的
            if conf < 0.01:  # 非常低的阈值，过滤填充项
                continue
                
            # 置信度过滤（业务阈值）
            if conf < conf_threshold:
                continue

            # 检查坐标是否已经是归一化坐标 (0-1范围内)
            # 如果是归一化坐标，则直接使用；否则转换为归一化坐标
            img_w, img_h = IMG_SIZE[1], IMG_SIZE[0]
            # 坐标是像素值，需要转换为归一化值
            x1_norm = x1 / img_w
            x2_norm = x2 / img_w
            y1_norm = y1 / img_h
            y2_norm = y2 / img_h

            # 转换为YOLO格式 (中心点和宽高，归一化值)
            cx = (x1_norm + x2_norm) / 2
            cy = (y1_norm + y2_norm) / 2
            w = abs(x2_norm - x1_norm)
            h = abs(y2_norm - y1_norm)

            results.append({
                "class_name": CLASSES[int(cls_id)],
                "class_id": int(cls_id),
                "confidence": float(conf),
                "bbox": [float(cx), float(cy), float(w), float(h)]
            })
        
        print(f"[INFO] Found {len(results)} valid detections")
        return results

    # 推理方法
    def infer(self, input_data):      
        # print("输入数据形状:", input_data.shape)  # (200, 200, 3)
        # print("输入数据范围:", input_data.min(), input_data.max())  # 11 255
        # print("输入数据 dtype:", input_data.dtype)  # uint8
        # 图像预处理
        input_data = self.preprocess(input_data)
        # print("输入数据形状:", input_data.shape)  # (1, 3, 896, 896)
        # print("输入数据范围:", input_data.min(), input_data.max())  # 0.047058824 1.0
        # print("输入数据 dtype:", input_data.dtype)  # float32
        # 设置输入张量地址
        self.context.set_tensor_address(self.inputs[0]['name'], int(self.inputs[0]['device_memory']))
        # 设置输出张量地址
        self.context.set_tensor_address(self.outputs[0]['name'], int(self.outputs[0]['device_memory']))

        # 将输入数据复制到GPU
        cuda.memcpy_htod_async(self.inputs[0]['device_memory'], input_data, self.stream)
        
        # 执行推理 (使用新API)
        self.context.execute_async_v3(stream_handle=self.stream.handle)
        
        # 将输出从GPU复制回CPU
        for output in self.outputs:
            cuda.memcpy_dtoh_async(output['host_memory'], output['device_memory'], self.stream)
        
        # 同步流
        self.stream.synchronize()
        
        # 获取输出
        output_data = [out['host_memory'] for out in self.outputs]
        # print('-'*50)
        # print(output_data)  # [array([496.5, 279. , 774.5, ...,   0. ,   0. ,   0. ], dtype=float32)]
        # print("原始输出数据:", output_data[0].shape)  # (1800,)
        # print("前10个值:", output_data[0][:10])  # [4.965000e+02 2.790000e+02 7.745000e+02 8.870000e+02 8.730469e-01 2.000000e+00 1.423750e+02 1.250000e+00 4.420000e+02 7.125000e+02]
        # print("统计: min={:.6f}, max={:.6f}, mean={:.6f}".format(output_data[0].min(), output_data[0].max(), output_data[0].mean()))  # min=-0.375000, max=894.500000, mean=2.952405
        # 后处理
        results = self.postprocess(output_data)

        # 返回输出
        return results


# # 使用示例
# model = TRTInference("best.engine")

# # 假设你有一张图像
# image = cv2.imread("../patches_5.jpg")
# outputs = model.infer(image)
# print(outputs)
# # 注意：输出是网络的原始输出，需要进行后处理（如解码边界框、NMS等）