
import numpy as np
import torch,time,onnx
import torchvision
import onnxruntime
from models.MobileNet import mobilenet_v2
import netron

def to_numpy(tensor):
    return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()


def pytorch_2_onnx(model,dummy_input,model_path,):
    # #导出模型
    torch.onnx.export(
        model,  # pytorch网络模型
        dummy_input,  # 随机的模拟输入
        model_path,  # 导出的onnx文件位置
        export_params=True,  # 导出训练好的模型参数
        verbose=10,  # 导出onnx的版本号
        training=torch.onnx.TrainingMode.EVAL,  # 导出模型调整到推理状态，将dropout，BatchNorm等涉及的超参数固定
        input_names=['input'],  # 为静态网络图中的输入节点设置别名，在进行onnx推理时，将input_names字段与输入数据绑定
        output_names=['output'],  # 为输出节点设置别名
        # 如果不设置dynamic_axes，那么对于输入形状为[4, 3, 224, 224]，在以后使用onnx进行推理时也必须输入[4, 3, 224, 224]
        # 下面设置了输入的第0维是动态的，以后推理时batch_size的大小可以是其他动态值
        dynamic_axes={
            # a dictionary to specify dynamic axes of input/output
            # each key must also be provided in input_names or output_names
            "input_data": {0: "batch_size"},
            "output_data": {0: "batch_size"}})




if __name__ == '__main__':


    dummy_input=torch.rand(1,3,224,224).cuda()
    model_param='save_models/20220601_output/weights-20-0-0.9643231899265478.pth'
    model_path=model_param.replace('.pth', '.onnx')
    netron_=False


    model=mobilenet_v2(4)
    model.load_state_dict(torch.load(model_param),strict=True)
    model=model.cuda()
    model.eval()
    out=model(dummy_input)

    pytorch_2_onnx(model, dummy_input, model_path)
    print('Exported model to onnx!')



    # 检查模型代码是否支持onnx
    onnx_net = onnx.load(model_path)
    onnx.checker.check_model(onnx_net)

    # 硬件检查精度误差是否在接受范围
    ort_session = onnxruntime.InferenceSession(model_path)
    # 计算oxx运行时的输出
    ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input)}
    ort_outs = ort_session.run(None, ort_inputs)
    # 比较onnx运行时和pytorch结果
    np.testing.assert_allclose(to_numpy(out), ort_outs[0], rtol=1e-03, atol=1e-05)
    print("Exported model has been tested with ONNXRuntime, and the result looks good!")
    print("Exported model has been tested with ONNXRuntime, and the result looks good!")

    # 查看onnx网络结构
    if netron_:
        netron.start(model_path)