# encoding=utf-8
# https://github.com/NVIDIA/TensorRT/blob/main/samples/python/introductory_parser_samples/onnx_resnet50.py
# https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/gettingStarted.html
# https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/infer/Core/BuilderConfig.html?highlight=memorypooltype

# 10.x api is not the same, rebuild
import tensorrt as trt

print("trt version: ", trt.__version__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)

builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(0)
config = builder.create_builder_config()
parser = trt.OnnxParser(network, TRT_LOGGER)

# build flag
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 * 1 << 30)  # 1GB
#config.set_flag(trt.BuilderFlag.FP16)

with open("export_dense121_cpu.onnx", 'rb') as model:
    if not parser.parse(model.read()):
        print("ERROR: Failed to parse the ONNX file.")
        for error in range(parser.num_errors):
            print(parser.get_error(error))
        exit(-1)


engine_bytes = builder.build_serialized_network(network, config)
with open("export_dense121_gpu.engine", 'wb') as engine:
    engine.write(engine_bytes)


