
import numpy as np
import cv2
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import onnxruntime as ort

class TRTWrapper:
    def __init__(self, engine_path):
        TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
        runtime = trt.Runtime(TRT_LOGGER)
        with open(engine_path, 'rb') as f:
            engine_data = f.read()
        self.engine = runtime.deserialize_cuda_engine(engine_data)
        self.context = self.engine.create_execution_context()
        self.stream = cuda.Stream()
        self.input_name  = [n for n in self.engine if self.engine.get_tensor_mode(n)==trt.TensorIOMode.INPUT][0]
        self.input_shape = tuple((1 if x==-1 else x) for x in self.engine.get_tensor_shape(self.input_name))
        self.input_dtype = trt.nptype(self.engine.get_tensor_dtype(self.input_name))
        self.d_input = cuda.mem_alloc(int(np.prod(self.input_shape) * np.dtype(self.input_dtype).itemsize))
        self.context.set_tensor_address(self.input_name, int(self.d_input))
        self.output_names  = [n for n in self.engine if self.engine.get_tensor_mode(n)==trt.TensorIOMode.OUTPUT]
        def fix_shape(s):  # 修正为batch=1
            return tuple((1 if x == -1 else x) for x in s)
        self.output_shapes = [fix_shape(self.engine.get_tensor_shape(n)) for n in self.output_names]
        self.output_dtypes = [trt.nptype(self.engine.get_tensor_dtype(n)) for n in self.output_names]
        self.d_outputs = [cuda.mem_alloc(int(np.prod(s) * np.dtype(dt).itemsize)) for s,dt in zip(self.output_shapes, self.output_dtypes)]
        for n, d_out in zip(self.output_names, self.d_outputs):
            self.context.set_tensor_address(n, int(d_out))
    def infer(self, img_np):
        h_input = np.ascontiguousarray(img_np.astype(self.input_dtype))
        assert tuple(h_input.shape) == self.input_shape, f"输入shape不对: {h_input.shape}, 期望: {self.input_shape}"
        h_outputs = [np.empty(s, dtype=dt) for s,dt in zip(self.output_shapes, self.output_dtypes)]
        cuda.memcpy_htod_async(self.d_input, h_input, self.stream)
        status = self.context.execute_async_v3(self.stream.handle)
        for h_out, d_out in zip(h_outputs, self.d_outputs):
            cuda.memcpy_dtoh_async(h_out, d_out, self.stream)
        self.stream.synchronize()
        return h_outputs if len(h_outputs) > 1 else h_outputs[0]