import pycuda.autoinit     
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
import os

TRT_LOGGER = trt.Logger(trt.Logger.INFO)

class MyEntropyCalibrator(trt.IInt8EntropyCalibrator2):
    def __init__(self, dataloader, input_shape):
        super().__init__()
        self.dataloader = iter(dataloader)
        self.input_shape = input_shape
        self.batch_size = input_shape[0]
        self.device_input = cuda.mem_alloc(trt.volume(input_shape) * np.dtype(np.float32).itemsize)
        self.current_batch = 0

    def get_batch(self, names):
        try:
            batch = next(self.dataloader)[0].numpy()  # [N, C, H, W]
            cuda.memcpy_htod(self.device_input, batch)
            self.current_batch += 1
            return [int(self.device_input)]
        except StopIteration:
            return None

    def get_batch_size(self):
        return self.batch_size

    def read_calibration_cache(self):
        return None

    def write_calibration_cache(self, cache):
        return None

def build_engine(onnx_file_path, engine_file_path=None, fp16_mode=False, int8_mode=False, calib_data=None):
    # TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
    TRT_LOGGER = trt.Logger(trt.Logger.INFO)
    builder = trt.Builder(TRT_LOGGER)
    network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
    network = builder.create_network(network_flags)
    parser = trt.OnnxParser(network, TRT_LOGGER)
    config = builder.create_builder_config()
    config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30)  # WORKSPACE内存上限1GB

    # 解析ONNX
    with open(onnx_file_path, 'rb') as model:
        if not parser.parse(model.read()):
            for error in range(parser.num_errors):
                print(parser.get_error(error))
            raise RuntimeError("Failed to parse ONNX model!")

    # FP16量化
    if fp16_mode and builder.platform_has_fast_fp16:
        config.set_flag(trt.BuilderFlag.FP16)

    # INT8量化
    if int8_mode and builder.platform_has_fast_int8:
        config.set_flag(trt.BuilderFlag.INT8)
        if calib_data is not None:
            config.int8_calibrator = calib_data
        else:
            raise ValueError("INT8模式需要提供校准器(calib_data参数不能为空)!")
    elif int8_mode:
        print("警告:本设备暂不支持INT8加速,跳过INT8配置!")

    # 构建序列化engine
    serialized_engine = builder.build_serialized_network(network, config)
    if serialized_engine is None:
        print("Failed to build serialized engine!")
        raise RuntimeError("Failed to build serialized engine! Please check your ONNX model and TensorRT logs.")
    runtime = trt.Runtime(TRT_LOGGER)
    engine = runtime.deserialize_cuda_engine(serialized_engine)
    if engine_file_path is not None:
        with open(engine_file_path, "wb") as f:
            f.write(serialized_engine)
    return engine

def load_engine(engine_file):
    runtime = trt.Runtime(TRT_LOGGER)
    with open(engine_file, "rb") as f:
        engine_data = f.read()
    engine = runtime.deserialize_cuda_engine(engine_data)
    return engine

import numpy as np
import pycuda.driver as cuda
import tensorrt as trt

def infer(engine, input_data):
    # 1. 获取 tensor 名称
    input_names  = [engine.get_tensor_name(i) for i in range(engine.num_io_tensors)
                    if engine.get_tensor_mode(engine.get_tensor_name(i)) == trt.TensorIOMode.INPUT]
    output_names = [engine.get_tensor_name(i) for i in range(engine.num_io_tensors)
                    if engine.get_tensor_mode(engine.get_tensor_name(i)) == trt.TensorIOMode.OUTPUT]

    input_name  = input_names[0]
    output_name = output_names[0]

    # 2. 获取 shape/dtype
    input_shape  = engine.get_tensor_shape(input_name)
    output_shape = engine.get_tensor_shape(output_name)
    input_dtype  = trt.nptype(engine.get_tensor_dtype(input_name))
    output_dtype = trt.nptype(engine.get_tensor_dtype(output_name))

    # 3. 分配CUDA内存
    h_input = np.ascontiguousarray(input_data.astype(input_dtype))
    h_output = np.empty(trt.volume(output_shape), dtype=output_dtype)
    d_input  = cuda.mem_alloc(h_input.nbytes)
    d_output = cuda.mem_alloc(h_output.nbytes)

    # 4. 创建 context、stream
    context = engine.create_execution_context()
    stream = cuda.Stream()

    # 5. 设定各 tensor 对应的 device 地址（重点！）
    context.set_tensor_address(input_name, int(d_input))
    context.set_tensor_address(output_name, int(d_output))

    # 6. H2D、推理、D2H
    cuda.memcpy_htod_async(d_input, h_input, stream)
    context.execute_async_v3(stream.handle)   # 只传 stream_handle 
    cuda.memcpy_dtoh_async(h_output, d_output, stream)
    stream.synchronize()
    return h_output.reshape(output_shape)

import tensorrt as trt
import json
import os

def load_scales_from_ppq_json(json_file):
    with open(json_file, 'r') as f:
        j = json.load(f)
    return {k: v['scale'] for k, v in j.items()}

class PPQJsonCalibrator(trt.IInt8EntropyCalibrator2):
    def __init__(self, scale_dict):
        super().__init__()
        self.scale_dict = scale_dict

    def get_batch_size(self):
        return 1

    def get_batch(self, names):
        return None

    def read_calibration_cache(self):
        cache_lines = []
        for name, scale in self.scale_dict.items():
            cache_lines.append(f"{name}: {scale}\n")
        return ''.join(cache_lines).encode('utf-8')

    def write_calibration_cache(self, cache):
        pass

def build_trt_engine_with_ppq_json(
        onnx_path: str,
        ppq_json_path: str,
        engine_path: str,
        fp16: bool = False,
        int8: bool = True,
        workspace: int = 1 << 30):
    """
    使用 TensorRT API + PPQ导出的JSON scale table构建 INT8 engine
    
    Args:
        onnx_path:      ONNX 文件路径
        ppq_json_path:  PPQ导出的json文件路径
        engine_path:    导出TRT engine文件路径
        fp16:           是否允许FP16混合精度（默认否）
        int8:           是否量化INT8（默认是）
        workspace:      最大工作空间（字节），默认1G
    """
    TRT_LOGGER = trt.Logger(trt.Logger.INFO)
    builder = trt.Builder(TRT_LOGGER)
    network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
    network = builder.create_network(network_flags)
    parser = trt.OnnxParser(network, TRT_LOGGER)
    with open(onnx_path, 'rb') as f:
        if not parser.parse(f.read()):
            print("Failed to parse ONNX model!")
            for idx in range(parser.num_errors):
                print(parser.get_error(idx))
            raise RuntimeError('ONNX parsing failed.')

    config = builder.create_builder_config()
    config.max_workspace_size = workspace

    # 是否开启FP16和INT8
    if fp16:
        config.set_flag(trt.BuilderFlag.FP16)
    if int8:
        config.set_flag(trt.BuilderFlag.INT8)
        # 加载PPQ量化scale表
        scales = load_scales_from_ppq_json(ppq_json_path)
        calibrator = PPQJsonCalibrator(scales)
        config.int8_calibrator = calibrator

    # 构建engine
    engine = builder.build_engine(network, config)
    if engine is None:
        raise RuntimeError("Failed to build engine")
    with open(engine_path, 'wb') as f:
        f.write(engine.serialize())
    print(f"Engine exported to {engine_path}")