import torch
import torch.onnx
from third_party.efficientloftr.src.loftr import LoFTR, full_default_cfg, opt_default_cfg, reparameter
from copy import deepcopy


# 假设你有一个PyTorch模型，例如一个简单的线性层
class SimpleModel(torch.nn.Module):
    def __init__(self):
        super(SimpleModel, self).__init__()
        self.linear = torch.nn.Linear(in_features=2, out_features=3)

    def forward(self, x):
        return self.linear(x)


model_type = 'full'  # 'full' for best quality, 'opt' for best efficiency
# You can choose numerical precision in ['fp32', 'mp', 'fp16']. 'fp16' for best efficiency
precision = 'fp16'
if model_type == 'full':
    _default_cfg = deepcopy(full_default_cfg)
elif model_type == 'opt':
    _default_cfg = deepcopy(opt_default_cfg)

if precision == 'mp':
    _default_cfg['mp'] = True
elif precision == 'fp16':
    _default_cfg['half'] = True

# 实例化模型
model = LoFTR(config=_default_cfg)
model.load_state_dict(
    torch.load("/home/liyuke/PycharmProjects/EfficientLoFTR/weights/eloftr_outdoor.ckpt")['state_dict'])
# 设置模型为评估模式，以便进行转换
model.eval()

# 定义输入张量，这里我们使用一个随机张量作为示例
x1 = torch.randn(1, 1, 512, 640)
x2 = torch.randn(1, 1, 512, 640)
input_names = ["data"]
# 使用torch.onnx.export进行模型转换
torch.onnx.export(model,  # 模型
                  {"data":{"image0":x1, "image1": x2}},  # 输入张量
                  '/home/liyuke/PycharmProjects/145/align/efficientloftr.onnx',
                  export_params=True,  # 是否导出模型参数
                  opset_version=12,  # ONNX版本
                  do_constant_folding=True,  # 是否执行常量折叠优化
                  input_names=input_names,  # 输入张量的名称
                  output_names=['output'],  # 输出张量的名称
                dynamic_axes={'data': {0: 'batch_size'},  # 动态轴
                               'output1': {0: 'batch_size'},
                               'output2': {0: 'batch_size'},
                               'output3': {0: 'batch_size'},
                              })

print("模型已成功转换为ONNX格式。")
