#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# @File   : tf_trt.py
# @Author : yuanwenjin
# @Mail   : xxxx@mail.com
# @Date   : 2020/06/28 16:54:43
# @Docs   : 使用tensorflow集成的tensorrt进行优化, 优化后仍为tf.GraphDef()格式文件
'''

import os
import argparse
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt

def save_frozen_graph(frozen_graph, save_name):
    '''
    ### Docs: 保存tensorflow冻结图
    ### Args:
        - frozen_graph: obj, tensorflow冻结图
        - save_name: str, 保存文件名
    ### Returns:
    ### Examples:
    '''

    with tf.gfile.GFile(save_name, 'wb') as f:
        f.write(frozen_graph.SerializeToString())

def load_frozen_graph(graph_file):
    '''
    ### Docs: 载入tensorflow冻结图
    ### Args:
        - graph_file: str, 文件名
    ### Returns:
        - frozen_graph: obj, tensorflow冻结图
    ### Examples:
    '''

    with tf.gfile.GFile(graph_file, 'rb') as f:
        frozen_graph = tf.GraphDef()
        frozen_graph.ParseFromString(f.read())
    return frozen_graph

def build_engine(model_file, output_name, fp='FP32', batch_size=16, workspace_size=1<<30):
    '''
    ### Docs: 使用tensorRT优化, 优化后仍为tf.GraphDef()格式文件
    ### Args:
        - model_file: str, 模型文件名, tensorflow生成的frozenpb文件
        - output_name: list, 模型输出层的名称
        - fp: str, 精度, FP32, FP16 or INT8, 这里都要大写, 若使用INT8, 需进行校准, 暂不支持
        - batch_size: int, 批处理大小
    '''

    frozen_graph = load_frozen_graph(model_file)

    # tensorRT graph
    # 不能设置output_saved_model_dir参数, 否则会报错(Not able to save to a SavedModel since input is a GraphDef)
    trt_graph = trt.create_inference_graph(input_graph_def=frozen_graph,
                                           outputs=output_name,
                                           max_batch_size=batch_size,
                                           max_workspace_size_bytes=workspace_size,
                                           precision_mode=fp)

    # # create_inference_graph() 函数由下面方法实现
    # converter = trt.TrtGraphConverter(input_graph_def=frozen_graph,
    #                                   nodes_blacklist=output_name,
    #                                   max_batch_size=batch_size,
    #                                   max_workspace_size_bytes=workspace_size,
    #                                   precision_mode=fp,
    #                                   use_calibration=False)
    # trt_graph = converter.convert()
    # # converter.save('my.engine') # 这句会报错(Not able to save to a SavedModel since input is a GraphDef)

    save_name = '{}_b{}_{}.pb'.format(os.path.splitext(model_file)[0], batch_size, fp.lower()) # splitext返回(文件名, 文件后缀)
    save_frozen_graph(trt_graph, save_name)


def print_help():
    '''打印帮助
    '''
    print('usage:')
    print('    python tf_trt.py --model_file frozen_model.pb')
    print('                     [--output_name resnet_v2_152/predictions/Softmax]')
    print('                     [--fp FP32]')
    print('                     [--batch_size 16]')
    print('                     [--workspace_size 1<<30]')

if __name__ == "__main__":
    
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_file', type=str, default='frozen_model.pb', help='weights path')
    parser.add_argument('--output_name', type=str, nargs='+', default=['resnet_v2_152/predictions/Softmax'], help='set output names')
    parser.add_argument('--fp', type=str, default='FP32', help='precision')
    parser.add_argument('--batch_size', type=int, default=16, help='batch size')
    parser.add_argument('--workspace_size', type=int, default=1<<30, help='maximum GPU temporary memory')
    args = parser.parse_args()

    try:
        model_file = args.model_file
        output_name = args.output_name
        fp = args.fp.upper()
        batch_size = args.batch_size
        workspace_size = args.workspace_size
        build_engine(model_file, output_name, fp, batch_size, workspace_size)
    except:
        print_help()

    # model_file = args.model_file
    # output_name = [ii for ii in args.output_name.split(',') if(ii)]
    # fp = args.fp.upper()
    # batch_size = args.batch_size
    # workspace_size = args.workspace_size
    # build_engine(model_file, output_name, fp, batch_size, workspace_size)
