import tensorrt as trt
import numpy as np
import fnmatch


def build_engine(onnx_path, trt_path, fp32_layers=['xxxx'],precision="FP16"):
    logger = trt.Logger(trt.Logger.WARNING)
    builder = trt.Builder(logger)
    network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
    parser = trt.OnnxParser(network, logger)

    # 加载 ONNX 模型
    with open(onnx_path, "rb") as f:
        if not parser.parse(f.read()):
            for error in range(parser.num_errors):
                print(f"ONNX Parser Error: {parser.get_error(error)}")
            exit()

    # 配置 Builder（全局 FP16）
    config = builder.create_builder_config()
    if precision == "FP16":
        config.set_flag(trt.BuilderFlag.FP16)

    # 遍历所有层并设置精度
    for layer in network:
        # 识别 fp32_layer 层
        for fp32_layer in fp32_layers:
            if fp32_layer in layer.name.lower():
                print(f"Setting FP32 precision for {fp32_layer} layer: {layer.name}")

                # 设置计算精度为 FP32
                layer.precision = trt.DataType.FLOAT

                # 强制输入/输出为 FP32
                for i in range(layer.num_inputs):
                    input_tensor = layer.get_input(i)
                    input_tensor.dtype = trt.DataType.FLOAT
                for i in range(layer.num_outputs):
                    output_tensor = layer.get_output(i)
                    output_tensor.dtype = trt.DataType.FLOAT

    # 构建引擎
    engine = builder.build_serialized_network(network, config) if hasattr(builder,
                                                                          "build_serialized_network") else builder.build_cuda_engine(
        network)

    with open(trt_path, "wb") as f:
        f.write(engine)

    return engine




# 示例用法
if __name__ == "__main__":
    onnx_path = "/home/adt/codes/python/StreamPETR/experiments/eog_drive_v2-250901/iter_410000_new_simplify.onnx"
    trt_path = "/home/adt/codes/python/StreamPETR/experiments/eog_drive_v2-250901/iter_410000_new_simplify.trt"

    build_engine(onnx_path=onnx_path,
                 trt_path=trt_path,
                 # fp32_layers=['softmax_474', 'softmax_575', 'softmax_676', 'softmax_777', 'softmax_878', 'softmax_979'],#self att里面的softmax置为fp32  根据onnx重新写！！！！
                 # focal
                 fp32_layers=['softmax_528', 'softmax_638', 'softmax_748', 'softmax_858', 'softmax_968', 'softmax_1078'],#self att里面的softmax置为fp32  根据onnx重新写！！！！
                 # fp32_layers=['softmax_471', 'softmax_572', 'softmax_673', 'softmax_774', 'softmax_875', 'softmax_976'],#self att里面的softmax置为fp32  根据onnx重新写！！！！
                 # fp32_layers=['softmax_515', 'softmax_616', 'softmax_717', 'softmax_818', 'softmax_919', 'softmax_1020'],#self att里面的softmax置为fp32  根据onnx重新写！！！！
                 # fp32_layers=['softmax'],#所有softmax置为fp32
                 precision="FP16")

    pass