from model import Model
import numpy as np
import torch
import torch.onnx
import onnx
import onnxruntime

device = torch.device("cpu")


def to_numpy(tensor):
    return tensor.detach().numpy() if tensor.requires_grad else tensor.numpy()


def main():
    assert isinstance(onnx_path, str), "lack of onnx_path parameter..."

    # model = Model(69)
    # model.load_state_dict(torch.load(pth_path), strict=True)

    # model_ = torch.jit.load(pth_path)

    model_py = torch.load(pth_path)
    model_py.eval()

    x = torch.randn(1, 3, 100, 200)
    torch.onnx.export(
        model_py,
        x,
        "model.onnx",
        opset_version=11,
        export_params=True,
        do_constant_folding=True,
        training = torch.onnx.TrainingMode.EVAL,
    )




    # state_dict = model.load_state_dict(model_, strict=True)
    # model.load_state_dict(state_dict.state_dict())
    #
    #
    model.eval()
    x = torch.rand(1, 3, 100, 200, requires_grad=True)
    print("x", type(x), x.shape)
    torch_out = model(x)
    torch.onnx.export(model,      # model being run
                      x,          # model input (or a tuple for multiple inputs)
                      onnx_path,  # where to save the model (can be a file or file-like object)
                      export_params=True,  # store the trained parameter weights inside the model file
                      opset_version=11,  # the ONNX version to export the model to
                      do_constant_folding=True,  # whether to execute constant folding for optimization
                      input_names=["input"],  # the model's input names
                      output_names=["output"],  # the model's output names
                      dynamic_axes={"input": {0: "batch_size"},  # variable length axes
                                    "output": {0: "batch_size"}})

    onnx_model = onnx.load(onnx_path)
    onnx.checker.check_model(onnx_model)
    ort_session = onnxruntime.InferenceSession(onnx_path)

    ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
    ort_outs = ort_session.run(None, ort_inputs)
    np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
    print("Exported model has been tested with ONNXRuntime, and the result looks good!")


if __name__ == '__main__':
    pth_path = "D:/CPP/LSTM_XUEXIN2/x64/Release/best_1.pth"
    onnx_path = "D:/CPP/LSTM_XUEXIN2/x64/Release/best_1.onnx"
    onnx_simple = "D:/CPP/LSTM_XUEXIN2/x64/Release/best_1_simple.onnx"

    main()
    # python -m onnxsim xuexin_20230314_ok.onnx xuexin_20230314_ok_simple.onnx --input-shape 1,3,100,200

