import cv2
import yaml
from plotting import *
import numpy as np
from model.superpoint import SuperPoint
# 读取视频文件
# cap1 = cv2.VideoCapture('/media/liyuke/小马好评/312/321data/321data/DJI_0007.mov')
# cap2 = cv2.VideoCapture('/media/liyuke/share/145/321data/321data/DJI_0008.mov')
# frame_width = int(cap1.get(cv2.CAP_PROP_FRAME_WIDTH))
# frame_height = int(cap1.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 检查视频是否成功打开
# if not cap1.isOpened() or not cap2.isOpened():
#     print("Error: Couldn't open video files")
#     exit()
#
# 初始化ORB特征检测器
config_file = f'/home/liyuke/PycharmProjects/145/align/configs/superglue.yml'
with open(config_file, 'r') as f:
    args = yaml.load(f, Loader=yaml.FullLoader)['example']
    if 'ckpt' in args:
        args['ckpt'] = args['ckpt']
    class_name = args['class']
model = SuperPoint(args)
matcher = lambda im1, im2: model.match_pairs(im1, im2)
# RANSAC参数
ransac_reproj_threshold = 5.0

# 循环读取每一帧
while True:
    # ret1, frame1 = cap1.read()
    # ret2, frame2 = cap2.read()
    # frame1_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    # frame2_gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
    frame1 = '/media/liyuke/share/145/321trans/321trans/1/1-0/1000003.png'
    frame2 = '/media/liyuke/share/145/321trans/321trans/1/1-0/1000003.png'

    # 如果其中一个视频结束，退出循环
    # if not ret1 or not ret2:
    #     break

    # 在第一帧中检测特征并计算描述符
    matches, _, _, _ = matcher(frame1, frame2)

    kp1 = matches[:,:2]
    kp2 = matches[:,2:]
    # plot_matches(frame1, frame2, matches, radius=2, lines=True)
    # 使用RANSAC算法剔除错误匹配
    M, mask = cv2.findHomography(kp1, kp2, cv2.RANSAC, ransac_reproj_threshold)
    mask_matches = mask.ravel().tolist()
    ransac_matches = [matches[i] for i, flag in enumerate(mask_matches) if flag]
    plot_matches(frame1, frame2, np.array(ransac_matches), radius=2, lines=True)
    frame1 = cv2.imread(frame1)
    frame2 = cv2.imread(frame2)
    project_img = cv2.warpPerspective(frame1,M,dsize=(frame2.shape[1],frame2.shape[0]))
    concat_img = np.concatenate((frame2,project_img),axis=1)
    #
    # # 绘制匹配的特征点
    # aligned_frame = cv2.warpPerspective(frame2, M, (frame_width, frame_height))
    # matched_img = cv2.drawMatches(frame1, kp1, frame2, kp2, ransac_matches, None,
    #                               flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)

    #
    # # 展示匹配结果
    cv2.imshow('Matched Frames', concat_img)
    #
    # # 按下 'q' 键退出循环
    if cv2.waitKey(25) & 0xFF == ord('q'):
        break
    data = {'project_img':project_img,'ir_img':frame2,'Homography':M}

# 释放资源
cap1.release()
cap2.release()
cv2.destroyAllWindows()
