import os
os.chdir("..")
from copy import deepcopy

import torch
import cv2
import numpy as np
import matplotlib.cm as cm
from third_party.efficientloftr.src.utils.plotting import make_matching_figure

from third_party.efficientloftr.src.loftr import LoFTR, full_default_cfg, opt_default_cfg, reparameter

# You can choose model type in ['full', 'opt']
model_type = 'full'  # 'full' for best quality, 'opt' for best efficiency

# You can choose numerical precision in ['fp32', 'mp', 'fp16']. 'fp16' for best efficiency
precision = 'fp32'  # Enjoy near-lossless precision with Mixed Precision (MP) / FP16 computation if you have a modern GPU (recommended NVIDIA architecture >= SM_70).

# You can also change the default values like thr. and npe (based on input image size)

if model_type == 'full':
    _default_cfg = deepcopy(full_default_cfg)
elif model_type == 'opt':
    _default_cfg = deepcopy(opt_default_cfg)

if precision == 'mp':
    _default_cfg['mp'] = True
elif precision == 'fp16':
    _default_cfg['half'] = True

print(_default_cfg)
matcher = LoFTR(config=_default_cfg)

matcher.load_state_dict(torch.load("/home/liyuke/PycharmProjects/EfficientLoFTR/weights/eloftr_outdoor.ckpt")['state_dict'])
matcher = reparameter(matcher)  # no reparameterization will lead to low performance

if precision == 'fp16':
    matcher = matcher.half()

matcher = matcher.eval().cuda()

# Load example images
cap1 = cv2.VideoCapture('/media/liyuke/share/145/321data/321data/DJI_0007.mov')
cap2 = cv2.VideoCapture('/media/liyuke/share/145/321data/321data/DJI_0008.mov')
while True:
    ret1, frame1 = cap1.read()
    ret2, frame2 = cap2.read()
    frame1_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    frame2_gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
    # frame1 = cv2.imread('/media/liyuke/share/145/321trans/321trans/1/1-0/1000003.png',cv2.IMREAD_GRAYSCALE)
    # frame2 = cv2.imread('/media/liyuke/share/145/321trans/321trans/1/1-1/1100001.png',cv2.IMREAD_GRAYSCALE)

    # 如果其中一个视频结束，退出循环
    if not ret1 or not ret2:
        break
# img0_pth = "/media/liyuke/share/145/321trans/321trans/1/1-0/1000001.png"
# img1_pth = "/media/liyuke/share/145/321trans/321trans/1/1-1/1100001.png"
# img0_raw = cv2.imread(img0_pth, cv2.IMREAD_GRAYSCALE)
# img1_raw = cv2.imread(img1_pth, cv2.IMREAD_GRAYSCALE)
    img0_raw = cv2.resize(frame1_gray, (frame2.shape[1]//32*32, frame2.shape[0]//32*32))  # input size shuold be divisible by 32
    img1_raw = cv2.resize(frame2_gray, (frame2.shape[1]//32*32, frame2.shape[0]//32*32))

    if precision == 'fp16':
        img0 = torch.from_numpy(img0_raw)[None][None].half().cuda() / 255.
        img1 = torch.from_numpy(img1_raw)[None][None].half().cuda() / 255.
    else:
        img0 = torch.from_numpy(img0_raw)[None][None].cuda() / 255.
        img1 = torch.from_numpy(img1_raw)[None][None].cuda() / 255.
    batch = {'image0': img0, 'image1': img1}

    # Inference with EfficientLoFTR and get prediction
    with torch.no_grad():
        if precision == 'mp':
            with torch.autocast(enabled=True, device_type='cuda'):
                matcher(batch)
        else:
            matcher(batch)
        mkpts0 = batch['mkpts0_f'].cpu().numpy()
        mkpts1 = batch['mkpts1_f'].cpu().numpy()
        mconf = batch['mconf'].cpu().numpy()

        #
        M, mask = cv2.findHomography(mkpts0, mkpts1, cv2.RANSAC, 5)
        project_img = cv2.warpPerspective(img0_raw, M, dsize=(img1_raw.shape[1], img1_raw.shape[0]))
        concat_img = np.concatenate((img1_raw, project_img), axis=1)
        cv2.imshow('Matched Frames', concat_img)
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break
        data = {'project_img': project_img, 'ir_img': frame2, 'Homography': M}

        #————————————————————————————

# 释放资源
cap1.release()
cap2.release()
cv2.destroyAllWindows()
