import torch
from collections import OrderedDict
import sys
import os
import re
import codecs
import json
import copy
import torchsummary

import torchvision.models as models


def convert_torch_model_parameters_to_onnx(model_filename: str):
    # pytorch parameter load(filename=None, ignore_discard=False, ignore_expires=False)
    torch_model = torch.load(model_filename)
    # get model structure
    model = models.resnet50()
    model.fc = torch.nn.Linear(2048, 4)
    model.load_state_dict(torch_model)
    # make dummay input

    batch_size = 1  # batch size
    input_shape = (3, 244, 384)  # input data shape

    # #set the model to inference mode
    model.eval()

    x = torch.randn(batch_size, *input_shape)  # 生成张量
    export_onnx_file = "{}.test.onnx".format(model_filename)  # onnx file name
    torch.onnx.export(model,
                      x,
                      export_onnx_file,
                      opset_version=10,
                      do_constant_folding=True,  # 是否执行常量折叠优化
                      input_names=["input"],  # 输入名
                      output_names=["output"],  # 输出名
                      dynamic_axes={"input": {0: "batch_size"},  # 批处理变量
                                    "output": {0: "batch_size"}})


def convert_torch_model_to_onnx(model_filename: str):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = torch.load(model_filename, map_location=device)  # pytorch模型加载
    batch_size = 1  # 批处理大小
    input_shape = (10, 80)  # 输入数据,改成自己的输入shape

    # #set the model to inference mode
    # model.eval()

    x = torch.randn(batch_size, *input_shape)   # 生成张量
    x = x.to(device)
    export_onnx_file = "{}.onnx".format(model_filename)		# 目的ONNX文件名
    torch.onnx.export(model,
                      x,
                      export_onnx_file,
                      opset_version=10,
                      do_constant_folding=True,  # 是否执行常量折叠优化
                      input_names=["input"],  # 输入名
                      output_names=["output"],  # 输出名
                      dynamic_axes={"input": {0: "batch_size"},  # 批处理变量
                                    "output": {0: "batch_size"}})

    print('finished')


def print_torch_model_info(torch_model_filename: str):
    # the torch_model_filename is a .pb file, and it only contains the model weights
    # the model structure is defined in other files
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = torch.load(torch_model_filename,
                       map_location=device)  # pytorch paramter load(filename=None, ignore_discard=False, ignore_expires=False)
    torchsummary.summary(model, (1, 10, 80))
    # print name and shape of input and output of model above
    with codecs.open('{}/model_info.txt'.format('exp'), 'w', 'utf-8') as f:
        for name, param in model.items():
            if 'input' in name or 'output' in name:
                f.write('{}: {}\n'.format(name, param.shape))


# main Function
if __name__ == '__main__':
    torch_model = "/Users/gaoxinglong/.cache/modelscope/hub/damo/speech_paraformer_asr-en-16k-vocab4199-pytorch/model.pb"
    print_torch_model_info(torch_model)
    convert_torch_model_to_onnx(torch_model)
