import tensorrt as trt
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit

# ------------------------
# 辅助函数：加载 TensorRT 引擎
# ------------------------
def load_engine(engine_path):
    logger = trt.Logger(trt.Logger.ERROR)
    with open(engine_path, "rb") as f, trt.Runtime(logger) as runtime:
        return runtime.deserialize_cuda_engine(f.read())

# ------------------------
# 执行推理函数
# ------------------------
def infer(engine, input_data):
    context = engine.create_execution_context()
    bindings = []
    inputs, outputs, allocations = [], [], []

    for binding in engine:
        idx = engine.get_binding_index(binding)
        dtype = trt.nptype(engine.get_binding_dtype(binding))
        shape = context.get_binding_shape(binding)
        size = np.prod(shape)
        device_mem = cuda.mem_alloc(size * np.dtype(dtype).itemsize)
        bindings.append(int(device_mem))
        allocations.append(device_mem)

        if engine.binding_is_input(binding):
            host_mem = np.ascontiguousarray(input_data.astype(dtype))
            cuda.memcpy_htod(device_mem, host_mem)
            inputs.append(host_mem)
        else:
            host_mem = np.empty(shape, dtype=dtype)
            outputs.append(host_mem)

    context.execute_v2(bindings=bindings)

    for binding, output in zip(engine, outputs):
        if not engine.binding_is_input(binding):
            idx = engine.get_binding_index(binding)
            cuda.memcpy_dtoh(output, allocations[idx])

    return outputs[0]

# ------------------------
# 主逻辑
# ------------------------
if __name__ == "__main__":
    fp32_engine_path = "mnist_model_epoch_01_fp32.trt"
    fp16_engine_path = "mnist_model_epoch_01_fp16.trt"

    print("🚀 加载 TensorRT 引擎中...")
    engine_fp32 = load_engine(fp32_engine_path)
    engine_fp16 = load_engine(fp16_engine_path)
    print("✅ 引擎加载成功！")

    # 输入大小：MNIST (1x1x28x28)
    input_shape = (1, 1, 28, 28)
    input_data = np.random.rand(*input_shape).astype(np.float32)

    print("▶️ 开始 FP32 推理...")
    output_fp32 = infer(engine_fp32, input_data)

    print("▶️ 开始 FP16 推理...")
    output_fp16 = infer(engine_fp16, input_data)

    # 对比输出
    diff = np.abs(output_fp32 - output_fp16)
    rel_diff = diff / (np.abs(output_fp32) + 1e-6)

    print("\n🧩 精度对比结果：")
    print(f"🔹 输出形状: {output_fp32.shape}")
    print(f"🔹 最大绝对误差: {np.max(diff):.6f}")
    print(f"🔹 平均绝对误差: {np.mean(diff):.6f}")
    print(f"🔹 平均相对误差: {np.mean(rel_diff):.6f}")

    if np.mean(diff) < 1e-3:
        print("🎯 FP16 精度与 FP32 基本一致！")
    else:
        print("⚠️ FP16 精度略有偏差，可进一步分析。")
