from argparse import Namespace
import torch
import numpy as np
import cv2

from model.loftr_src.loftr.utils.cvpr_ds_config import default_cfg
from model.full_model import GeoFormer as GeoFormer_

from eval_tool.immatch.utils.data_io import load_gray_scale_tensor_cv
from model.geo_config import default_cfg as geoformer_cfg
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import time
class GeoFormer():
    def __init__(self, imsize, match_threshold, no_match_upscale=False, ckpt=None, device='cuda'):

        self.device = device
        self.imsize = imsize
        self.match_threshold = match_threshold
        self.no_match_upscale = no_match_upscale

        # Load model
        conf = dict(default_cfg)
        conf['match_coarse']['thr'] = self.match_threshold
        geoformer_cfg['coarse_thr'] = self.match_threshold
        self.model = GeoFormer_(conf)
        ckpt_dict = torch.load(ckpt, map_location=torch.device('cpu'))
        if 'state_dict' in ckpt_dict:
            ckpt_dict = ckpt_dict['state_dict']
        self.model.load_state_dict(ckpt_dict, strict=False)
        self.model = self.model.eval().to(self.device)

        # Name the method
        self.ckpt_name = ckpt.split('/')[-1].split('.')[0]
        self.name = f'GeoFormer_{self.ckpt_name}'
        if self.no_match_upscale:
            self.name += '_noms'
        print(f'Initialize {self.name}')

    def change_deivce(self, device):
        self.device = device
        self.model.to(device)
    def load_im(self, im_path, enhanced=False):
        return load_gray_scale_tensor_cv(
            im_path, self.device, imsize=self.imsize, dfactor=8, enhanced=enhanced, value_to_scale=min
        )

    def match_inputs_(self, gray1, gray2, is_draw=False, output_dir=None):
        print(f'Input shape: {gray1.shape}, {gray2.shape}')
        batch = {'image0': gray1, 'image1': gray2}
        with torch.no_grad():
            batch = self.model(batch)
        kpts1 = batch['mkpts0_f'].cpu().numpy()
        kpts2 = batch['mkpts1_f'].cpu().numpy()
        def draw():
            import matplotlib.pyplot as plt
            import cv2
            import numpy as np
            plt.figure(dpi=200)
            kp0 = kpts1
            kp1 = kpts2
            # if len(kp0) > 0:
            kp0 = [cv2.KeyPoint(int(k[0]), int(k[1]), 30) for k in kp0]
            kp1 = [cv2.KeyPoint(int(k[0]), int(k[1]), 30) for k in kp1]
            matches = [cv2.DMatch(_trainIdx=i, _queryIdx=i, _distance=1, _imgIdx=-1) for i in
                       range(len(kp0))]

            show = cv2.drawMatches((gray1.cpu()[0][0].numpy() * 255).astype(np.uint8), kp0,
                                   (gray2.cpu()[0][0].numpy() * 255).astype(np.uint8), kp1, matches,
                                   None)
            print('len(kp0):', len(kp0))
            print(output_dir)
            
            if output_dir is None:
                cv2.imwrite('/data/ylm/code/pythonCode/GeoFormer/GeoFormer-main/image/show.jpg', show)
            else:
                cv2.imwrite(output_dir, show)
            # print('show shape:', show.shape)
            # plt.imshow(show)
            # plt.show()
        if is_draw:
            draw()
        scores = batch['mconf'].cpu().numpy()
        matches = np.concatenate([kpts1, kpts2], axis=1)
        return matches, kpts1, kpts2, scores

    def match_pairs(self, im1_path, im2_path, cpu=False, is_draw=False, output_dir=None):
        torch.cuda.empty_cache()
        tmp_device = self.device
        if cpu:
            self.change_deivce('cpu')
        gray1, sc1 = self.load_im(im1_path)
        gray2, sc2 = self.load_im(im2_path)

        upscale = np.array([sc1 + sc2])
        matches, kpts1, kpts2, scores = self.match_inputs_(gray1, gray2, is_draw, output_dir=output_dir)

        if self.no_match_upscale:
            return matches, kpts1, kpts2, scores, upscale.squeeze(0)

        # Upscale matches &  kpts
        matches = upscale * matches
        kpts1 = sc1 * kpts1
        kpts2 = sc2 * kpts2

        if cpu:
            self.change_deivce(tmp_device)

        return matches, kpts1, kpts2, scores
    
# 创建马赛克棋盘对比效果的函数
def create_checkerboard(img1, img2, tile_size=50):
    """
    img1: 图像1 (BGR格式)
    img2: 图像2 (BGR格式)
    tile_size: 棋盘格方块大小（像素）
    """
    h, w = img1.shape[:2]
    
    # 生成棋盘格掩模
    mask = np.zeros((h, w), dtype=np.uint8)
    for y in range(0, h, tile_size):
        for x in range(0, w, tile_size):
            # 判断当前方块是否为奇偶交替区域
            if (x // tile_size + y // tile_size) % 2 == 0:
                mask[y:y+tile_size, x:x+tile_size] = 255
    
    # 按掩模混合图像
    blended = cv2.bitwise_and(img1, img1, mask=mask)
    blended += cv2.bitwise_and(img2, img2, mask=~mask)
    return blended

def main():
    name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
    src_pt_path = '/data/ylm/code/pythonCode/GeoFormer/GeoFormer-main/image/wexin_20250329235509.jpg' #  源图像
    dst_pt_path = '/data/ylm/code/pythonCode/GeoFormer/GeoFormer-main/image/weioxinbianji_20250330154852.jpg' #目标图像
    output_dir = f'/data/ylm/code/pythonCode/GeoFormer/GeoFormer-main/image/out/{name}.jpg'
    output_dir_warped = f'/data/ylm/code/pythonCode/GeoFormer/GeoFormer-main/image/out/{name}-warped.jpg'
    output_dir_warped_masc = f'/data/ylm/code/pythonCode/GeoFormer/GeoFormer-main/image/out/{name}-masc.jpg'
    model_path = '/data/ylm/code/pythonCode/GeoFormer/GeoFormer-main/tmp_save/save_oxford_30/tmp300.ckpt'
    image_size = 640
    match_threshold = 0.6
    g = GeoFormer(image_size, 
                  match_threshold, 
                no_match_upscale=False, 
                ckpt=model_path, 
                device='cuda')

    _, kpts1, kpts2, _ = g.match_pairs(src_pt_path, 
                dst_pt_path,
                output_dir=output_dir,
                is_draw=True)
    
    img_src = cv2.imread(src_pt_path)
    img_dst = cv2.imread(dst_pt_path)
    
    if len(kpts1) >= 4:
        src_pts = kpts1.reshape(-1, 1, 2)
        dst_pts = kpts2.reshape(-1, 1, 2)
        
        # 计算初始单应性矩阵，并获取内点掩码（mask）
        M_ransac, mask_ransac = cv2.findHomography(
            src_pts, dst_pts, 
            method=cv2.RANSAC,    # 使用RANSAC算法
            ransacReprojThreshold=2.0  # 重投影误差阈值（单位：像素）
        )

        # 提取内点（RANSAC筛选后的点）
        inlier_src = src_pts[mask_ransac.ravel() == 1]
        inlier_dst = dst_pts[mask_ransac.ravel() == 1]

        print('len(inlier_src):', len(inlier_src))
        
        # --------------------------------
        # 步骤2：使用内点重新计算单应性矩阵（无RANSAC）
        # --------------------------------
        # 使用所有内点，通过最小二乘法计算更精确的单应性矩阵
        M_refined, _ = cv2.findHomography(
            inlier_src, inlier_dst,
            method=0  # 0表示最小二乘法（无鲁棒性处理）
        )

        # --------------------------------
        # 应用变换并显示结果
        # --------------------------------
        h, w = img_src.shape[:2]
        aligned_img = cv2.warpPerspective(img_dst, M_refined, (w, h))
        cv2.imwrite(output_dir_warped, aligned_img)

        # 创建马赛克棋盘对比效果
        checkerboard = create_checkerboard(aligned_img, img_src, tile_size=20)
        cv2.imwrite(output_dir_warped_masc, checkerboard)
    
if __name__ == '__main__':
    main()
    

