#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# @File   : onnx_2_trt_1.py
# @Author : yfor
# @Mail   : xxxx@mail.com
# @Date   : 2020/06/24 15:26:36
# @Docs   : 生成tensorRT可以使用的engine, 使用onnx格式
'''

import os
import argparse
import tensorrt as trt
from trt_utils import save_engine, TRT_LOGGER
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)

def GiB(val):
    return val * 1 << 30

def build_engine(onnx_file, fp='fp32', batch_size=1):
    '''
    ### Docs: 生成engine, onnx文件
    ### Args:
        - onnx_file: str, 模型文件名
        - fp: str, 精度, fp32, fp16 or int8, 若使用int8, 需进行校准, 暂不支持
        - batch_size: int, 批处理大小
    '''

    with trt.Builder(TRT_LOGGER) as builder, \
            builder.create_network(EXPLICIT_BATCH) as network, \
            trt.OnnxParser(network, TRT_LOGGER) as parser:
        builder.max_batch_size = batch_size # always 1 for explicit batch
        config = builder.create_builder_config()
        config.max_workspace_size = GiB(1)
        if fp.lower() == 'fp32':
            pass
        elif fp.lower() == 'fp16':
            config.set_flag(trt.BuilderFlag.FP16)
        elif fp.lower() == 'int8':
            config.set_flag(trt.BuilderFlag.INT8)
        else:
            raise ValueError(("precision mode '{}' is not supported. It should be one of {}").format(fp, ['fp32', 'fp16', 'int8']))

        with open(onnx_file, 'rb') as model:
            if not parser.parse(model.read()):
                print ('ERROR: Failed to parse the ONNX file.')
                for error in range(parser.num_errors):
                    print (parser.get_error(error))
                return None

        print("Building TensorRT engine, this may take a few minutes...")
        trt_engine = builder.build_engine(network, config)

        # Save engine for further use
        engine_path = '{}_{}.engine'.format(os.path.splitext(onnx_file)[0], fp.lower()) # splitext返回(文件名, 文件后缀)
        save_engine(trt_engine, engine_path)

def print_help():
    '''打印帮助
    '''
    print('usage:')
    print('    python onnx_2_trt_1.py --onnx_file yolov5x.onnx')
    print('                           [--fp fp32]')
    print('                           [--batch_size 16]')

if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument('--onnx_file', type=str, default='frozen_model.onnx', help='onnx file')
    parser.add_argument('--fp', type=str, default='FP32', help='precision')
    parser.add_argument('--batch_size', type=int, default=1, help='batch size')
    args = parser.parse_args()
    print(args)

    try:
        onnx_file = args.onnx_file
        fp = args.fp
        batch_size = args.batch_size
        build_engine(onnx_file, fp, batch_size)
    except:
        print_help()

    # onnx_file = args.onnx_file
    # fp = args.fp
    # batch_size = args.batch_size
    # build_engine(onnx_file, fp, batch_size)
