# encoding=utf-8
import logging
import os
import sys
import tensorrt as trt

logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)


def convert(onnx_path: str, precision: str, engine_path: str):
    # log
    workspace = 8
    trt_logger = trt.Logger(trt.Logger.INFO)
    
    # set flag
    builder = trt.Builder(trt_logger)
    config = builder.create_builder_config()
    config.max_workspace_size = workspace * (2 ** 30)
    
    # load onnx
    network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
    network = builder.create_network(network_flags)
    parser = trt.OnnxParser(network, trt_logger)

    with open(onnx_path, 'rb') as f:
        if not parser.parse(f.read()):
            return

    # info read
    inputs = [network.get_input(i) for i in range(network.num_inputs)]
    logger.info("Network Description")

    for inp in inputs:
        logger.info("Input '{}' with shape {} and dtype {}".format(inp.name, inp.shape, inp.dtype))

    # build engine
    if precision == "fp16":
        if not builder.platform_has_fast_fp16:
            logger.warning("FP16 is not supported natively on this platform/device")
        else:
            config.set_flag(trt.BuilderFlag.FP16)
    elif precision == "int8":
        if not builder.platform_has_fast_int8:
            logger.warning("INT8 is not supported natively on this platform/device")
        else:
            print("Exporting a QAT model...")
            config.set_flag(trt.BuilderFlag.INT8)

    engine_byte = builder.build_serialized_network(network, config)
    with open(engine_path, 'wb') as f:
        f.write(engine_byte)

    print("done")


if __name__ == '__main__':
    convert("../UnetOnnx/unet-output.onnx", "fp16", "unet-engine.trt")