#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# @File   : to_onnx.py
# @Author : yfor
# @Mail   : xxxx@mail.com
# @Date   : 2020/06/24 16:05:08
# @Docs   : 生成onnx文件, 参考了 https://github.com/ultralytics/yolov5 和 https://github.com/TrojanXu/yolov5-tensorrt.git
'''

import os
import argparse
import torch
from onnxsim import simplify
import onnx

import sys
sys.path.append('yolov5')
from yolo import Model

def GiB(val):
    return val * 1 << 30

def load_model(weights='models/yolov5x.pt', model_cfg='models/yolov5x.yaml', device=torch.device('cpu')):
    # Load model
    model = Model(model_cfg).to(device)
    ckpt = torch.load(weights, map_location=device)
    ckpt['model'] = \
                {k: v for k, v in ckpt['model'].state_dict().items() if model.state_dict()[k].numel() == v.numel()}
    model.load_state_dict(ckpt['model'], strict=False)
    model.eval()
    return model

def simplify_onnx(onnx_path):
    model = onnx.load(onnx_path)
    model_simp, check = simplify(model)
    assert check, "Simplified ONNX model could not be validated"
    onnx.save(model_simp, onnx_path)

def export_onnx(model, input_size, onnx_path, batch_size=1, input_name=['input'], output_name=['output'], device=torch.device('cpu')):
    img = torch.zeros((batch_size, *input_size)).to(device)
    torch.onnx.export(model, (img), onnx_path, 
           input_names=input_name, 
           output_names=output_name, 
           verbose=False, 
           opset_version=10, 
           operator_export_type=torch.onnx.OperatorExportTypes.ONNX
    )

def to_onnx(weights, model_cfg, input_size, batch_size=1, input_name=['input'], output_name=['output']):
    '''
    ### Docs: 将模型转换成onnx格式, 这里仅针对yolov5, 对于其他模型, 可能需要调整load_model()
    ### Args:
        - weights: str, 模型文件名, pytorch训练好的模型文件
        - model_cfg: str, 模型配置文件
        - input_size: tuple, (c,h,w), 输入数据大小
        - batch_size: int, 批处理大小
        - input_name: list, 设置模型输入层的名称
        - output_name: list, 设置模型输出层的名称
    '''

    device = torch.device('cpu')
    if torch.cuda.is_available():
        device = torch.device('cuda:0')

    onnx_path = '{}_b{}.onnx'.format(os.path.splitext(weights)[0], batch_size) # splitext返回(文件名, 文件后缀)
    model = load_model(weights, model_cfg, device)
    export_onnx(model, input_size, onnx_path, batch_size, input_name, output_name, device)
    simplify_onnx(onnx_path)

    # Check onnx model
    model = onnx.load(onnx_path)  # load onnx model
    onnx.checker.check_model(model)  # check onnx model
    print(onnx.helper.printable_graph(model.graph))  # print a human readable representation of the graph
    print('Export complete')

def print_help():
    '''打印帮助
    '''
    print('usage:')
    print('    python to_onnx.py --weights yolov5x.pt')
    print('                      --model_cfg yolov5x.yaml')
    print('                      [--img_size 3 640 640]')
    print('                      [--batch_size 16]')
    print('                      [--input_name images]')
    print('                      [--output_name output]')

if __name__ == "__main__":

    # to_onnx('models/yolov5x.pt', 'models/yolov5x.yaml', (3, 640, 512), batch_size=1, input_name=["data"], output_name=["model/22"])
    # model = load_model('models/yolov5x.pt', 'models/yolov5x.yaml', torch.device('cuda:0'))
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, default='./yolov5x.pt', help='weights path')
    parser.add_argument('--model_cfg', type=str, default='./yolov5x.yaml', help='weights path')
    parser.add_argument('--img_size', type=int, nargs='+', default=[3, 640, 640], help='image size')
    parser.add_argument('--batch-size', type=int, default=1, help='batch size')
    parser.add_argument('--input-name', type=str, nargs='+', default=['images'], help='set input names')
    parser.add_argument('--output-name', type=str, nargs='+', default=['output'], help='set output names')
    args = parser.parse_args()
    print(args)

    try:
        weights = args.weights
        model_cfg = args.model_cfg
        input_size = args.img_size
        batch_size = args.batch_size
        input_name = args.input_name
        output_name = args.output_name
        to_onnx(weights, model_cfg, input_size, batch_size, input_name, output_name)
    except:
        print_help()

    # weights = args.weights
    # model_cfg = args.model_cfg
    # input_size = args.img_size
    # batch_size = args.batch_size
    # input_name = args.input_name
    # output_name = args.output_name
    # to_onnx(weights, model_cfg, input_size, batch_size, input_name, output_name)
