import torch
import torch.onnx
import onnx
import onnxruntime
import numpy as np

device = torch.device("cpu")


def to_numpy(tensor):
    return tensor.detach().numpy() if tensor.requires_grad else tensor.numpy()


def main(pth_path, onnx_path):
    assert isinstance(onnx_path, str), "lack of onnx_path parameter..."

    # Load the TorchScript model
    model = torch.jit.load(pth_path)
    model.eval()

    x = torch.rand(1, 3, 100, 200, requires_grad=True)
    print("x", type(x), x.shape)

    # Call the model directly
    torch_out = model(x)

    torch.onnx.export(model,
                      x,
                      onnx_path,
                      export_params=True,
                      opset_version=11,
                      do_constant_folding=True,
                      input_names=["input"],
                      output_names=["output"],
                      dynamic_axes={"input": {0: "batch_size"},
                                    "output": {0: "batch_size"}})

    onnx_model = onnx.load(onnx_path)
    onnx.checker.check_model(onnx_model)
    ort_session = onnxruntime.InferenceSession(onnx_path)

    ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
    ort_outs = ort_session.run(None, ort_inputs)
    np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
    print("Exported model has been tested with ONNXRuntime, and the result looks good!")


if __name__ == '__main__':
    pth_path = "./xue_xin_20250204_.pt"
    onnx_path = "./xue_xin_20250204_.onnx"
    onnx_simple = "./xue_xin_20250204_simple.onnx"

    main(pth_path, onnx_path)
