import cv2
import os
from method2.R_image_matching_main.risgmatching_lglue import RISLG,add_ones,calHomographyMat,trueH
from configuration import Configuration
from Modules.ImageProcess import MyCV2 as imp
import numpy as np
import yaml
import rasterio
from method2.R_image_matching_main.risgmatching_sg2 import RISGMatcher, add_ones,calHomographyMat
from process_image.tif_split import read_tif_and_split_into_blocks,raster2opencv
def resize_img(img,z_h,z_w):
    h,w,_ = img.shape
    zoom_h = z_h / h
    zoom_w = z_w / w
    img_z = cv2.resize(img,(z_w,z_h),cv2.INTER_AREA)
    return img_z,zoom_h,zoom_w

def zoom_coords(pts,zoom_w,zoom_h):
    x = pts[:,0:1]/zoom_w
    y = pts[:,1:2]/zoom_h
    return np.concatenate([x,y], axis=1)

def get_ref_info(ref_name):
    ref_name, rx1, ry1, rx2, ry2 = os.path.basename(ref_name)[:-4].split('_')
    return ref_name, rx1, ry1, rx2, ry2

def get_query_info(query_name):
    ref_name, _, _, _, _, _, rx1, ry1, rx2, ry2 = os.path.basename(query_name)[:-4].split('_')
    return ref_name, rx1, ry1, rx2, ry2
def find_corners1(img):
    '''

    :param image_path:
    :return: corners(上左，左下，下右，右上，列行)
    '''
    # # 读取图像
    # img = cv2.imread(image_path)
    # if img is None:
    #     print("Error: Image not found.")
    #     return

        # 转换为灰度图
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 应用阈值处理
    _, thresh = cv2.threshold(gray, 2, 253, cv2.THRESH_BINARY)

    # 寻找轮廓
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 假设最大的轮廓是主体内容
    if contours:
        max_contour = max(contours, key=cv2.contourArea)

        # 轮廓的边界点（近似为角点）
        # 这里我们直接使用轮廓的边界点，但在实际情况中可能需要更复杂的算法来定义角点
        peri = cv2.arcLength(max_contour, True)
        approx = cv2.approxPolyDP(max_contour, 0.04 * peri, True)

        # 角点坐标(上左，左下，下右，右上，列行)列行
        corners = approx.reshape((-1, 2))

        return corners
def find_jiaodian(corners):
    temp = corners[:, 0] + corners[:, 1]
    index1 = np.argmin(temp)
    index2 = np.argmax(temp)
    return np.concatenate([corners[index1], corners[index2]]).reshape(-1, 2)

def cal_result(name, result):
    # ref_name, cy1, cx1, cy2, cx2 = os.path.basename(name)[:-4].split('_')
    ref_name, cx1, cy1, cx2, cy2 = get_ref_info(name)
    x1, y1, x2, y2 = np.squeeze(result)
    cx1 = int(cx1)
    cy1 = int(cy1)
    x1 += cx1
    x2 += cx1
    y1 += cy1
    y2 += cy1
    return np.array([x1, y1, x2, y2]).reshape(1, 4)

def calculate_ious(gt_box, pred_boxes):
    """
    计算一个真实框与多个预测框之间的交并比 (IoU)。
    参数:
    - gt_box: 形状为(1, 4)的numpy数组,表示真实框的左上、右下像素坐标。
    - pred_boxes: 形状为(n, 4)的numpy数组,表示n个预测框的左上、右下像素坐标。
    返回:
    - 一个长度为n的numpy数组,包含每个预测框与真实框的交并比。
    """
    x1,y1,x2,y2 = np.squeeze(gt_box)
    gt_box = np.array([x1,y1,x2,y2]).reshape(1,4)
    # 提取坐标
    xmin_gt, ymin_gt, xmax_gt, ymax_gt = gt_box.squeeze()
    xmin_pred, ymin_pred, xmax_pred, ymax_pred = pred_boxes.T
    # 计算交集的坐标
    inter_xmin = np.maximum(xmin_gt, xmin_pred)
    inter_ymin = np.maximum(ymin_gt, ymin_pred)
    inter_xmax = np.minimum(xmax_gt, xmax_pred)
    inter_ymax = np.minimum(ymax_gt, ymax_pred)
    # 计算交集面积，避免负数
    inter_area = np.maximum(0, inter_xmax - inter_xmin) * np.maximum(0, inter_ymax - inter_ymin)
    # 计算真实框和预测框各自的面积
    area_gt = (xmax_gt - xmin_gt) * (ymax_gt - ymin_gt)
    area_pred = (xmax_pred - xmin_pred) * (ymax_pred - ymin_pred)
    # 计算并集面积
    union_area = area_gt + area_pred - inter_area
    # 计算IoU
    iou = inter_area / union_area
    # 处理可能出现的除以零情况（当预测框或真实框面积为0时）
    iou = np.where(union_area == 0, 0, iou)  # 如果并集面积为0，则IoU设为0
    return iou
def cal_from_zoom(img2_z,H,zoom_w1, zoom_h1,path1):
    corners = find_corners1(img2_z)
    ori_corners = np.dot(H, add_ones(corners).T).T[:, 0:2]
    mid_result = find_jiaodian(ori_corners)
    zoom_mid_result = zoom_coords(mid_result, zoom_w1, zoom_h1).reshape(1, 4)
    result = cal_result(path1, zoom_mid_result)
    return result
def cal_from_ori(img2,H,path1):
    corners = find_corners1(img2)
    ori_corners = np.dot(H, add_ones(corners).T).T[:, 0:2]
    mid_result = find_jiaodian(ori_corners).reshape(1, 4)
    result = cal_result(path1, mid_result)
    return result

def lg_match(config,img1_z, img2_z):
    rilg = RISLG()
    nrotate = config.nrotate
    mkpts1, mkpts2, conf, H = rilg.match(img1_z, img2_z, nrotate=nrotate, all=0)
    return mkpts1,mkpts2,H
def sg_match(config,img1_z, img2_z):
    config_filename = config.sp_config_filename
    nrotate = config.nrotate
    with open(config_filename, 'r') as f:
        sp_config = yaml.safe_load(f)
        risg = RISGMatcher(sp_config)
    mkpts1, mkpts2, conf, H = risg.match(img1_z, img2_z, nrotate=nrotate, all=0)
    return mkpts1, mkpts2, H

def split_query(img):
    corners = find_corners1(img)
    top_mid = []
def cut_img(img,corners):
    '''

    Args:
        img:
        corners:

    Returns:
        img:裁切后图像
        原图中矩形角点坐标：上左，左下，下右，上右xy
    '''
    h,w,_ = img.shape
    x_min = int(max(0,min(corners[:,0])))
    x_max = int(min(max(corners[:,0]),w))
    y_min = int(max(min(corners[:,1]),0))
    y_max = int(min(max(corners[:,1]),h))
    return img[y_min:y_max,x_min:x_max],np.array([x_min,y_min,x_min,y_max,x_max,y_max,x_max,y_min]).reshape(-1,2)
def img_split(img,num=4):
    _,h,w = img.shape
    corners = find_corners1(img)
    top_left = corners[0:1]
    left_lower = corners[1:2]
    lower_right = corners[2:3]
    top_right = corners[3:4]
    mid_top_left = (top_left + left_lower)/2
    mid_left_lower = (left_lower + lower_right)/2
    mid_lower_right = (lower_right + top_right)/2
    mid_top_right = (top_right + top_left)/2
    mid = (mid_top_left+mid_lower_right)/2
    mid_corners = np.concatenate([mid_top_right,mid_top_left,mid_left_lower,mid_lower_right,mid_top_right],axis=0)
    # mid_corners = np.concatenate([mid_top_left,mid_left_lower,mid_lower_right,mid_top_right,mid_top_left],axis=0)
    img_result = []#上左，左下，下右，右上
    # coords = np.array([]).reshape(-1,2)
    coords = []
    for i in range(len(corners)):
        cut_corners = np.concatenate([mid,corners[i:i+1],mid_corners[i:i+1],mid_corners[i+1:i+2]],axis=0)
        img_cut,coord = cut_img(img,cut_corners)
        img_result.append(img_cut)
        coords.append(coord)
        # coords = np.concatenate([coords,coord],axis=0)
    return img_result,coords

def block_match(img1,img2,H21,config,way='lg'):
    img2_splited, coord2_splited = img_split(img2)
    all_mkpts1 = np.array([]).reshape(-1,2)
    all_mkpts2 = np.array([]).reshape(-1,2)

    for i in range(len(img2_splited)):
        coord1 = np.dot(H21, add_ones(coord2_splited[i]).T).T[:, 0:2]
        img1_cut,coord1_cut = cut_img(img1,coord1)
        if way=='lg':
            mkpts1, mkpts2, _ = lg_match(config, img1_cut,img2_splited[i])
        mkpts1 = np.concatenate([mkpts1[:,0:1]+coord1_cut[0,0],mkpts1[:,1:2]+coord1_cut[0,1]],axis=1)
        mkpts2 = np.concatenate([mkpts2[:,0:1]+coord2_splited[i][0,0],mkpts2[:,1:2]+coord2_splited[i][0,1]],axis=1)
        all_mkpts1 = np.concatenate([all_mkpts1,mkpts1],axis=0)
        all_mkpts2 = np.concatenate([all_mkpts2,mkpts2],axis=0)

        # img = imp.drawlines(img1, img2, all_mkpts1, all_mkpts2)
        # imp.cv_show('1', img)
    # img = imp.drawlines(img1, img2, all_mkpts1, all_mkpts2)
    # imp.cv_show('1', img)
    mkpts2, mkpts1, H1 = calHomographyMat(all_mkpts2, all_mkpts1)
    # mkpts2, mkpts1, H1 = trueH(all_mkpts2, all_mkpts1)
    return mkpts1, mkpts2, H1

def zoom_cal_iou(result_zoom,path2):
    ref_name1, Tx1, Ty1, Tx2, Ty2 = get_query_info(path2)
    true_loc = np.array([int(Tx1), int(Ty1), int(Tx2), int(Ty2)]).reshape(1, 4)
    iou_zoom = calculate_ious(true_loc, result_zoom)
    return iou_zoom
def print_iou(path2,result_zoom,result_zoomb,result_ori,result_orib):
    ref_name1, Tx1, Ty1, Tx2, Ty2 = get_query_info(path2)
    true_loc = np.array([int(Tx1), int(Ty1), int(Tx2), int(Ty2)]).reshape(1, 4)

    iou_zoom = calculate_ious(true_loc, result_zoom)
    iou_zoomb = calculate_ious(true_loc, result_zoomb)
    iou_ori = calculate_ious(true_loc, result_ori)
    iou_orib = calculate_ious(true_loc, result_orib)
    print(f'iou_zoom:{iou_zoom}')
    print(f'iou_zoomb:{iou_zoomb}')
    print(f'iou_ori:{iou_ori}')
    print(f'iou_orib:{iou_orib}')

def cal_ori_H(mkpts1,mkpts2,zoom_w1, zoom_h1,zoom_w2, zoom_h2):
    ori_mkpts1 = zoom_coords(mkpts1, zoom_w1, zoom_h1)
    ori_mkpts2 = zoom_coords(mkpts2, zoom_w2, zoom_h2)
    mkpts11, mkpts21, H1 = calHomographyMat(ori_mkpts2, ori_mkpts1, p1=3)
    return H1
def match(path1,path2):
    img1 = imp.cv_imread(path1)[:,:,0:3]
    img2 = imp.cv_imread(path2)[:,:,0:3]
    corr = find_corners1(img2)
    z_h, z_w = 1000,1000
    img1_z,zoom_h1,zoom_w1 = resize_img(img1, z_h, z_w)
    if min(img2.shape[0:2])>max(z_w,z_h):
        img2_z, zoom_h2, zoom_w2 = resize_img(img2, z_h, z_w)
    config = Configuration()
    mkpts1,mkpts2,H = lg_match(config, img1_z, img2_z)
    mkptsb1, mkptsb2, Hb = block_match(img1_z, img2_z, H, config, way='lg')
    # mkpts1,mkpts2,H = sg_match(config, img1_z, img2_z)

    H1 = cal_ori_H(mkpts1, mkpts2, zoom_w1, zoom_h1, zoom_w2, zoom_h2)
    Hb1 = cal_ori_H(mkptsb1, mkptsb2, zoom_w1, zoom_h1, zoom_w2, zoom_h2)

    ref_name, rx1, ry1, rx2, ry2 = get_ref_info(path1)
    result_zoom = cal_from_zoom(img2_z,H,zoom_w1, zoom_h1,path1)
    result_zoomb = cal_from_zoom(img2_z,Hb,zoom_w1, zoom_h1,path1)
    result_ori = cal_from_ori(img2,H1,path1)
    result_orib = cal_from_ori(img2, Hb1, path1)
    print_iou(path2, result_zoom, result_zoomb, result_ori,result_orib)



if __name__ == "__main__":
    path1 = 'D:/python_program/2024/sat-geo-loc-finals/method2/H49D001012_2100_7900_13800_18600.jpg'
    path2 = 'E:/satLocate/fusai/query/crop/raster/H49D001012_16_2060_7965_11060_17965_2550_8152_13299_18304.tif'
    # path1 = 'D:/python_program/2024/sat-geo-loc-finals/method2/cropH49D001012_2100_7900_13800_18600.jpg'
    # path2 = 'E:/satLocate/fusai/query/crop/raster/H49D001012_16_6094_10357_11018_15281_7368_10579_13249_15578.tif'
    match(path1,path2)