import os
import torch
from utils.ConfigReader import ConfigReader
from network.model_v3 import mobilenet_v3_large

os.chdir('../')
CR = ConfigReader('config.yaml')
pth_path = CR.getElement('pth_path')
onnx_path = CR.getElement('onnx_path')
input_size = CR.getElement('input_size')
assert pth_path.endswith('.pth'), "\033[1;31m Please give a model path ending with" \
                                  "\'pth\' \033[0m"
assert onnx_path.endswith('.onnx'), "\033[1;31m Please give a generated model path ending with" \
                                    "\'onnx\' \033[0m"
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
input = torch.randn(1, 3, input_size, input_size)
model = mobilenet_v3_large(num_classes=3, reduced_tail=True)


def pth_to_onnx(input, pth_path, onnx_path, input_names=['input'], output_names=['output']):
    """
    pth模型转为onnx模型
    :param input: 随机输入tensor
    :param pth_path: pth模型路径
    :param onnx_path: 生成onnx模型路径
    :param input_names: 网络输入节点的名称
    :param output_names: 网络输出节点的名称
    """

    model.load_state_dict(torch.load(pth_path))  # 初始化权重
    model.eval()
    input.to(device)
    torch.onnx.export(model, input, onnx_path, verbose=True, input_names=input_names,
                      output_names=output_names)  # 指定模型的输入，以及onnx的输出路径

    print("\033[1;32mExporting .pth network to onnx network has been successful!\033[0m")


if __name__ == '__main__':
    pth_to_onnx(input, pth_path, onnx_path)
