import matplotlib.pyplot as plt
import cv2
import kornia as K
import kornia.feature as KF
import numpy as np
import torch
from kornia_moons.feature import *
from kornia_moons.viz import draw_LAF_matches
import os


# 从批次中提取第一个图像，并调整通道顺序
def preprocess_image(tensor_image):
    # 提取第一个图像 (B=0)
    image = tensor_image[0]  # 形状: [D, H, W]

    # 如果图像是单通道，直接返回
    if image.shape[0] == 1:  # 单通道图像
        image = image.squeeze(0)  # 形状: [H, W]
    else:  # 如果是多通道图像
        image = image.permute(1, 2, 0)  # 调整维度为 [H, W, C]

    # 将浮点数 (0-1) 转换为 uint8 (0-255)
    image = (image.cpu().numpy() * 255).astype(np.uint8)
    return image


def convert_to_keypoints(kp_list):
    return [cv2.KeyPoint(x=float(kp[0]), y=float(kp[1]), size=1) for kp in kp_list]


def match(ref, src, is_debug=False):
    # if img1.de
    matcher = KF.LoFTR(pretrained='outdoor')
    matcher = matcher.cuda()
    img1 = ref.permute(2, 0, 1).unsqueeze(0).float() / 255.
    img1_conv = K.color.bgr_to_rgb(img1)
    img2 = src.permute(2, 0, 1).unsqueeze(0).float() / 255.
    img2_conv = K.color.bgr_to_rgb(img2)

    input_dict = {"image0": K.color.rgb_to_grayscale(img1_conv),  # LofTR 只在灰度图上作用
                  "image1": K.color.rgb_to_grayscale(img2_conv)}

    if os.path.exists("data.pth"):
         correspondences = torch.load("./data.pth",map_location=torch.device('cpu'))
    else:
        with torch.inference_mode():
            correspondences = matcher(input_dict)
            # torch.save("./data.pth",correspondences)
   
    # 现在让我们用现代RANSAC清理对应关系，并估计两幅图像之间的基本矩阵
    mkpts0 = correspondences['keypoints0'].cpu().numpy()
    mkpts1 = correspondences['keypoints1'].cpu().numpy()
    confidence = correspondences["confidence"].cpu().numpy()
    mkpts0 = mkpts0[confidence > 0.7]
    mkpts1 = mkpts1[confidence > 0.7]

    Fm, inliers = cv2.findHomography(np.float32(mkpts1.tolist()), np.float32(mkpts0.tolist()), cv2.RANSAC, 1)
    # Fm, inliers = cv2.findFundamentalMat(mkpts1, mkpts0, cv2.USAC_MAGSAC, 0.5,0.999,100000)
    good_kp1 = mkpts0[inliers.squeeze(-1) > 0]
    good_kp2 = mkpts1[inliers.squeeze(-1) > 0]
    tmp = []


    img2_cnv = K.color.rgb_to_bgr(img2_conv)
    img2_conv = K.tensor_to_image(img2_cnv)
    img2_cnv = img2_conv * 255
    dst = cv2.warpPerspective(img2_cnv , Fm, (int(img2_cnv.shape[1]), int(img2_cnv.shape[0])))
    cv2.imwrite("std_wrap.jpeg", dst)



    for i in range(len(good_kp1)):
        tmp.append(cv2.DMatch(i, i, 0))



    # 预处理 img1 和 img2
    img1_np = preprocess_image(img1)
    img2_np = preprocess_image(img2)

    # 将关键点坐标列表转换为 cv2.KeyPoint 格式

    # 转换 good_kp1 和 good_kp2 到 KeyPoint 格式
    good_kp1_cv = convert_to_keypoints(good_kp1)
    good_kp2_cv = convert_to_keypoints(good_kp2)

    dbg = cv2.drawMatches(img1_np, good_kp1_cv, img2_np, good_kp2_cv, tmp, None, flags=2)
    # cv2.imshow("Debug" + str(time.time())[-3:-1], dbg)
    cv2.imwrite("./corr.jpeg", dbg)
    # cv2.waitKey(0)

    return dst


