#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# @File   : tf_2_trt.py
# @Author : yfor
# @Mail   : xxxx@mail.com
# @Date   : 2020/06/24 13:49:34
# @Docs   : 生成tensorRT可以使用的engine, 以slim中的resnet_v2_152为例
'''

import os
import sys
import tensorrt as trt
import uff
import argparse
from trt_utils import save_engine, TRT_LOGGER

def build_engine(model_file, input_name, input_size, output_name, fp='fp32', batch_size=16):
    '''
    ### Docs: 生成engine
    ### Args:
        - model_file: str, 模型文件名, tensorflow生成的frozenpb文件
        - input_name: str, 模型输入层的名称
        - input_size: tuple, (c,w,h), 输入数据大小
        - output_name: list, 模型输出层的名称
        - fp: str, 精度, fp32, fp16 or int8, 若使用int8, 需进行校准, 暂不支持
        - batch_size: int, 批处理大小
    '''

    fp = fp.lower()
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
        builder.max_workspace_size = 1 << 30
        builder.max_batch_size = batch_size
        if fp == 'fp32':
            pass
        elif fp == 'fp16' and builder.platform_has_fast_fp16:
            builder.fp16_mode = True
        elif fp == 'int8' and builder.platform_has_fast_int8:
            builder.int8_mode = True
        parser.register_input(input_name, input_size)
        for out_layer in output_name:
            parser.register_output(out_layer)

        parser.parse_buffer(uff.from_tensorflow_frozen_model(model_file), network)
        print("Building TensorRT engine, this may take a few minutes...")
        trt_engine = builder.build_cuda_engine(network)

        # Save engine for further use
        engine_path = '{}_b{}_{}.engine'.format(os.path.splitext(model_file)[0], batch_size, fp) # splitext返回(文件名, 文件后缀)
        save_engine(trt_engine, engine_path)

def print_help():
    '''打印帮助
    '''
    print('usage:')
    print('    python tf_2_trt.py --model_file frozen_model.pb')
    print('                       [--input_name data]')
    print('                       [--input_size 3 256 256]')
    print('                       [--output_name resnet_v2_152/predictions/Softmax]')
    print('                       [--fp fp32]')
    print('                       [--batch_size 16]')

if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument('--model_file', type=str, default='frozen_model.pb', help='weights path')
    parser.add_argument('--input_name', type=str, default='data', help='set input names')
    parser.add_argument('--input_size', type=int, nargs='+', default=(3,256,256), help='input image size(c,h,w)')
    parser.add_argument('--output_name', type=str, nargs='+', default=['resnet_v2_152/predictions/Softmax'], help='set output names')
    parser.add_argument('--fp', type=str, default='fp32', help='precision')
    parser.add_argument('--batch_size', type=int, default=16, help='batch size')
    args = parser.parse_args()

    try:
        model_file = args.model_file
        input_name = args.input_name
        input_size = args.input_size
        output_name = args.output_name
        fp = args.fp
        batch_size = args.batch_size
        build_engine(model_file, input_name, input_size, output_name, fp, batch_size)
    except:
        print_help()

    # model_file = args.model_file
    # input_name = args.input_name
    # input_size = args.input_size
    # output_name = [ii for ii in args.output_name.split(',') if(ii)]
    # print(output_name)
    # fp = args.fp
    # batch_size = args.batch_size
    # build_engine(model_file, input_name, input_size, output_name, fp, batch_size)
