import onnxruntime
import numpy as np
from onnxruntime.datasets import get_example
import torch

import  sys
import os
sys.path.append(os.path.dirname(__file__) +'/../')
import cv2
import torch
import json
import tqdm
import argparse
import numpy as np
from mlsd_pytorch.cfg.default import get_cfg_defaults
from mlsd_pytorch.models.build_model import build_model
from mlsd_pytorch.data.utils import deccode_lines
from mlsd_pytorch.metric import msTPFP, AP
from albumentations import Normalize

def to_numpy(tensor):
    return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()


def get_args():
    args = argparse.ArgumentParser()
    current_dir = default=os.path.dirname(__file__)
    args.add_argument("--config", type=str,default = current_dir  + '/configs/mobilev2_mlsd_large_512_base2_bsize24.yaml')
    args.add_argument("--model_path", type=str,
                      default= "/root/autodl-tmp/mlsd_pytorch/workdir/models/mobilev2_mlsd_large_512_bsize24/best.pth")
    args.add_argument("--gt_json", type=str,
                      default= "/root/autodl-tmp/mlsd_pytorch/mlsd_pytorch/line/annotations/instances_val2017.json")
    args.add_argument("--img_dir", type=str,
                      default= "/root/autodl-tmp/mlsd_pytorch/mlsd_pytorch/line/val2017")
    args.add_argument("--sap_thresh", type=float, help="sAP thresh", default=10.0)
    args.add_argument("--top_k", type=float, help="top k lines", default= 300)
    args.add_argument("--min_len", type=float, help="min len of line", default=5.0)
    args.add_argument("--score_thresh", type=float, help="line score thresh", default=0.05)
    args.add_argument("--input_size", type=int, help="image input size", default=512)

    return args.parse_args()

args = get_args()
cfg = get_cfg_defaults()
if args.config.endswith('\r'):
    args.config = args.config[:-1]
print('using config: ', args.config.strip())
cfg.merge_from_file(args.config)


torch_model = build_model(cfg)
torch_model.load_state_dict(torch.load(args.model_path), strict=True)
torch_model.eval()

cuda_torch_model = build_model(cfg)
cuda_torch_model.load_state_dict(torch.load(args.model_path), strict=True)
cuda_torch_model = cuda_torch_model.cuda()
cuda_torch_model.eval()


# 得到torch模型的输出
dummy_input = torch.randn(1, 3, 512, 512)

torch_model_input = dummy_input
cuda_torch_model_input = dummy_input.cuda()

with torch.no_grad():
    torch_out = torch_model(torch_model_input)
    cuda_torch_out = cuda_torch_model(cuda_torch_model_input)


# 得到onnx模型的输出
onnx_model = get_example(r'/root/autodl-tmp/mlsd_pytorch/mlsd_pytorch/super_resolution.onnx')  # 要写绝对路径
cuda_session = onnxruntime.InferenceSession(onnx_model, providers=['CUDAExecutionProvider'])
cpu_session = onnxruntime.InferenceSession(onnx_model, providers=['CPUExecutionProvider']) 

inname = [input.name for input in cpu_session.get_inputs()]
cuda_onnx_out = cuda_session.run(None, {inname[0]: to_numpy(dummy_input)})[0]
cpu_onnx_out = cpu_session.run(None, {inname[0]: to_numpy(dummy_input)})[0]


decimal_precision = 3

print("cpu torch and cuda torch")
try:
# 判断输出结果是否一致，小数点后4位一致即可
    np.testing.assert_almost_equal(to_numpy(torch_out), to_numpy(cuda_torch_out), decimal=decimal_precision)
    print("cpu torch and cuda torch equal")
except Exception as e:
    print("cpu torch and cuda torch not equal")
    print(e)
    
print("cpu onnx and cuda onnx")
try:
# 判断输出结果是否一致，小数点后4位一致即可
    np.testing.assert_almost_equal(cuda_onnx_out, cpu_onnx_out, decimal=decimal_precision)
    print("cpu onnx and cuda onnx equal")
except Exception as e:
    print("cpu onnx and cuda onnx not equal")
    print(e)    

print("cpu torch and cpu onnx")
try:
# 判断输出结果是否一致，小数点后4位一致即可
    np.testing.assert_almost_equal(to_numpy(torch_out), cpu_onnx_out, decimal=decimal_precision)
    print("cpu onnx and cuda onnx equal")
except Exception as e:
    print("cpu torch and cuda onnx not equal")
    print(e)

print("cpu torch and cuda onnx")
try:
# 判断输出结果是否一致，小数点后4位一致即可
    np.testing.assert_almost_equal(to_numpy(torch_out), cuda_onnx_out, decimal=decimal_precision)
    print("cpu torch and cuda onnx equal")
except Exception as e:
    print("cpu torch and cuda onnx not equal")
    print(e)    

print("gpu torch and cpu onnx")
try:
# 判断输出结果是否一致，小数点后4位一致即可
    np.testing.assert_almost_equal(to_numpy(cuda_torch_out), cpu_onnx_out, decimal=decimal_precision)
    print("gpu torch and cpu onnx equal")
except Exception as e:
    print("gpu torch and cpu onnx not equal")
    print(e)
    
print("gpu torch and gpu onnx")
try:
# 判断输出结果是否一致，小数点后4位一致即可
    np.testing.assert_almost_equal(to_numpy(cuda_torch_out), cuda_onnx_out, decimal=decimal_precision)
    print("gpu torch and gpu onnx equal")
except Exception as e:
    print("gpu torch and gpu onnx not equal")
    print(e)