from net import AlexNet
import torch.onnx

import onnxruntime
import numpy as np

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import onnx
print(onnx.__version__, " opset=", onnx.defs.onnx_opset_version())

def pth2onnx():
    model = AlexNet(num_classes =10, in_channels =3)
    model.load_state_dict(torch.load('best_alexnet_model.pth'))



    dummy_input = torch.randn(1,3,227,227)

    # 设置动态轴
    # 001_AlexNet 输入宽高是固定的，不能动态，只有batch size 可以
    dynamic_axes = {
        "input": {0: "batch_size"},  # 动态 batch size
        "output": {0: "batch_size"}
    }

    # 执行转换
    torch.onnx.export(model,
                      dummy_input,
                      "model.onnx",
                      export_params=True,  # store the trained parameter weights inside the model file
                      opset_version=9,    # the ONNX version to export the model to
                      do_constant_folding=True,  # whether to execute constant folding for optimization
                      input_names=['input'],  # the model's input names
                      output_names=['output'],  # the model's output names
                      dynamic_axes=dynamic_axes)
    model = onnx.load('model.onnx')
    onnx.checker.check_model(model)
    print("ONNX 模型验证成功！")

def test_onnx_model():
    # 加载ONNX模型
    ort_session = onnxruntime.InferenceSession("model.onnx", providers=['CUDAExecutionProvider'])

    # 准备输入数据
    input_shape = (1, 3, 227, 227)
    dummy_input = torch.randn(*input_shape).numpy()

    # 获取输入和输出的名称
    input_name = ort_session.get_inputs()[0].name
    output_name = ort_session.get_outputs()[0].name

    # 运行ONNX模型
    onnx_outputs = ort_session.run([output_name], {input_name: dummy_input})

    # 加载原始PyTorch模型
    model = AlexNet(num_classes=10, in_channels=3)
    model.load_state_dict(torch.load('best_alexnet_model.pth'))
    model.eval()

    # 将模型移动到GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)

    # 运行PyTorch模型
    with torch.no_grad():
        pytorch_outputs = model(torch.from_numpy(dummy_input).to(device)).cpu()

    # 将PyTorch输出转换为numpy数组
    pytorch_outputs = pytorch_outputs.numpy()

    # 比较输出
    np.testing.assert_allclose(pytorch_outputs, onnx_outputs[0], rtol=1e-6, atol=1e-03)

    print("ONNX model output matches PyTorch model output")


if __name__ == "__main__":


    # pth2onnx()
    test_onnx_model()



