import os
import onnxruntime
import torch
from third_party.efficientloftr.src.loftr import LoFTR, full_default_cfg, opt_default_cfg
from third_party.superglue.models.superpoint import SuperPoint
from copy import deepcopy
import yaml

def get_args():
    import argparse

    parser = argparse.ArgumentParser("convert loftr torch weights to onnx format")
    parser.add_argument("--model_path", type=str, default='.\weights\outdoor_ds.ckpt')

    return parser.parse_args()


def main():
    model_type = 'full'  # 'full' for best quality, 'opt' for best efficiency
    # You can choose numerical precision in ['fp32', 'mp', 'fp16']. 'fp16' for best efficiency
    precision = 'fp16'

    if model_type == 'full':
        _default_cfg = deepcopy(full_default_cfg)
    elif model_type == 'opt':
        _default_cfg = deepcopy(opt_default_cfg)

    if precision == 'mp':
        _default_cfg['mp'] = True
    elif precision == 'fp16':
        _default_cfg['half'] = True

    config_file = f'/home/liyuke/PycharmProjects/145/align/configs/superglue.yml'
    with open(config_file, 'r') as f:
        args = yaml.load(f, Loader=yaml.FullLoader)['example']
        if 'ckpt' in args:
            args['ckpt'] = args['ckpt']
        class_name = args['class']

    model = SuperPoint(args)
    # model.load_state_dict(torch.load("/home/liyuke/PycharmProjects/EfficientLoFTR/weights/eloftr_outdoor.ckpt")["state_dict"])
    model.eval()

    batch_size = 1
    height = 512
    width = 512

    data = {}
    image = torch.randn(batch_size, 1, height, width)
    out = model(image)
    input_names = ["image"]
    output_names = ["keypoints", "scores", "descriptors"]
    torch.onnx.export(
        model,
        (image,),
        "superpoint.onnx",
        export_params=True,
        opset_version=16,
        do_constant_folding=True,
        input_names=input_names,
        output_names=output_names,

    )

    print(f"\nonnx model is saved to: {os.getcwd()}/superpoint.onnx")

    print("\ntest inference using onnxruntime")

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    if device == 'cuda':
        onnx_providers = ['CUDAExecutionProvider']
    else:
        onnx_providers = ['CPUExecutionProvider']
    sess = onnxruntime.InferenceSession("superpoint.onnx", providers=onnx_providers)
    for input in sess.get_inputs():
        print("input: ", input)

    print("\n")
    for output in sess.get_outputs():
        print("output: ", output)


if __name__ == "__main__":
    import warnings

    warnings.filterwarnings("ignore")

    main()