import tensorrt as trt
import os
import argparse
import pycuda.driver as cuda
import pycuda.autoinit

class TensorRTExporter:
    def __init__(self, onnx_file_path, engine_file_path=None, precision='fp32'):
        self.onnx_file_path = onnx_file_path
        self.engine_file_path = engine_file_path or onnx_file_path.replace('.onnx', '.trt')
        self.precision = precision.lower()
        self.logger = trt.Logger(trt.Logger.WARNING)
        self.builder = trt.Builder(self.logger)
        self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
        self.parser = trt.OnnxParser(self.network, self.logger)
        self.config = self.builder.create_builder_config()

    def export(self, input_shape=None):
        # 解析ONNX模型
        with open(self.onnx_file_path, 'rb') as model_file:
            if not self.parser.parse(model_file.read()):
                print('ERROR: Failed to parse the ONNX file.')
                for error in range(self.parser.num_errors):
                    print(self.parser.get_error(error))
                return False

        # 设置精度
        if self.precision == 'fp16' and self.builder.platform_has_fast_fp16:
            self.config.set_flag(trt.BuilderFlag.FP16)
        elif self.precision == 'int8' and self.builder.platform_has_fast_int8:
            self.config.set_flag(trt.BuilderFlag.INT8)
            # 这里需要添加INT8校准器

        # 设置最大工作空间大小
        self.config.max_workspace_size = 1 << 30  # 1GB

        # 设置输入形状（如果提供）
        if input_shape:
            profile = self.builder.create_optimization_profile()
            input_tensor = self.network.get_input(0)
            input_name = input_tensor.name
            min_shape = (1,) + input_shape[1:]
            opt_shape = input_shape
            max_shape = (4,) + input_shape[1:]
            profile.set_shape(input_name, min_shape, opt_shape, max_shape)
            self.config.add_optimization_profile(profile)

        # 构建并保存引擎
        serialized_engine = self.builder.build_serialized_network(self.network, self.config)
        if not serialized_engine:
            print('ERROR: Failed to build TensorRT engine.')
            return False

        with open(self.engine_file_path, 'wb') as f:
            f.write(serialized_engine)

        print(f'TensorRT engine saved to {self.engine_file_path}')
        return True

    def get_inference_time(self):
        # 简单的推理时间测量
        if not os.path.exists(self.engine_file_path):
            print('ERROR: TensorRT engine file not found.')
            return None

        # 创建运行时和引擎
        runtime = trt.Runtime(self.logger)
        with open(self.engine_file_path, 'rb') as f:
            engine = runtime.deserialize_cuda_engine(f.read())

        # 创建上下文
        context = engine.create_execution_context()

        # 分配内存
        inputs, outputs, bindings, stream = self.allocate_buffers(engine)

        # 执行推理并测量时间
        import time
        start_time = time.time()
        context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
        cuda.Context.synchronize()
        inference_time = (time.time() - start_time) * 1000  # 转换为毫秒

        print(f'Inference time: {inference_time:.2f} ms')
        return inference_time

    def allocate_buffers(self, engine):
        inputs = []
        outputs = []
        bindings = []
        stream = cuda.Stream()

        for binding in engine:
            size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
            dtype = trt.nptype(engine.get_binding_dtype(binding))
            # 分配主机和设备缓冲区
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            # 将设备缓冲区添加到绑定列表
            bindings.append(int(device_mem))
            #  Append to the appropriate list.
            if engine.binding_is_input(binding):
                inputs.append({'host': host_mem, 'device': device_mem})
            else:
                outputs.append({'host': host_mem, 'device': device_mem})
        return inputs, outputs, bindings, stream


def export_tensorrt(onnx_file_path, input_shape=(1, 3, 640, 640), precision='fp16'):
    """导出ONNX模型到TensorRT引擎"""
    trt_exporter = TensorRTExporter(onnx_file_path, precision=precision)
    success = trt_exporter.export(input_shape)
    if success:
        trt_exporter.get_inference_time()
    return success

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--onnx_file', type=str, required=True, help='Path to ONNX file')
    parser.add_argument('--precision', type=str, default='fp16', choices=['fp32', 'fp16', 'int8'], help='Precision mode')
    parser.add_argument('--batch_size', type=int, default=1, help='Batch size for optimization profile')
    args = parser.parse_args()

    input_shape = (args.batch_size, 3, 640, 640)
    export_tensorrt(args.onnx_file, input_shape, args.precision)