import cv2
import numpy as np
from configuration import Configuration
from retrieval_modify import image_retrieval
# from R_image_matching_main.risgmatching_sift22 import RSIFTMatcher,add_ones
from R_image_matching_main.risgmatching_sg2 import RISGMatcher, add_ones
# from R_image_matching_main.risgmatching_loftr import RLoftrMatcher,add_ones
import yaml
from Models.FolderProcess import MyOS as fdp
# 程序中链接了多个 OpenMP 运行时库的副本
import os
from tqdm import tqdm
import sys

# from save1.risgmatching_sift import RSIFTMatcher,add_ones
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


def calculate_ious(gt_box, pred_boxes):
    """
    计算一个真实框与多个预测框之间的交并比 (IoU)。
    参数:
    - gt_box: 形状为(1, 4)的numpy数组,表示真实框的左上、右下像素坐标。
    - pred_boxes: 形状为(n, 4)的numpy数组,表示n个预测框的左上、右下像素坐标。
    返回:
    - 一个长度为n的numpy数组,包含每个预测框与真实框的交并比。
    """
    # 提取坐标
    xmin_gt, ymin_gt, xmax_gt, ymax_gt = gt_box.squeeze()
    xmin_pred, ymin_pred, xmax_pred, ymax_pred = pred_boxes.T
    # 计算交集的坐标
    inter_xmin = np.maximum(xmin_gt, xmin_pred)
    inter_ymin = np.maximum(ymin_gt, ymin_pred)
    inter_xmax = np.minimum(xmax_gt, xmax_pred)
    inter_ymax = np.minimum(ymax_gt, ymax_pred)
    # 计算交集面积，避免负数
    inter_area = np.maximum(0, inter_xmax - inter_xmin) * np.maximum(0, inter_ymax - inter_ymin)
    # 计算真实框和预测框各自的面积
    area_gt = (xmax_gt - xmin_gt) * (ymax_gt - ymin_gt)
    area_pred = (xmax_pred - xmin_pred) * (ymax_pred - ymin_pred)
    # 计算并集面积
    union_area = area_gt + area_pred - inter_area
    # 计算IoU
    iou = inter_area / union_area
    # 处理可能出现的除以零情况（当预测框或真实框面积为0时）
    iou = np.where(union_area == 0, 0, iou)  # 如果并集面积为0，则IoU设为0
    return iou


def find_corners1(image_path):
    '''

    :param image_path:
    :return: corners(左上，右下，列行)
    '''
    # 读取图像
    img = cv2.imread(image_path)
    if img is None:
        print("Error: Image not found.")
        return

        # 转换为灰度图
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 应用阈值处理
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    # 寻找轮廓
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 假设最大的轮廓是主体内容
    if contours:
        max_contour = max(contours, key=cv2.contourArea)

        # 轮廓的边界点（近似为角点）
        # 这里我们直接使用轮廓的边界点，但在实际情况中可能需要更复杂的算法来定义角点
        peri = cv2.arcLength(max_contour, True)
        approx = cv2.approxPolyDP(max_contour, 0.04 * peri, True)

        # 角点坐标(左上，左下，右下，右上)列行
        corners = approx.reshape((-1, 2))

        return corners


def find_corners(image_path):
    '''

    :param image_path:
    :return: corners(左上，右下，列行)
    '''
    # 读取图像
    img = cv2.imread(image_path)
    if img is None:
        print("Error: Image not found.")
        return

        # 转换为灰度图
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 应用阈值处理
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    # 寻找轮廓
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 假设最大的轮廓是主体内容
    if contours:
        max_contour = max(contours, key=cv2.contourArea)

        # 轮廓的边界点（近似为角点）
        # 这里我们直接使用轮廓的边界点，但在实际情况中可能需要更复杂的算法来定义角点
        peri = cv2.arcLength(max_contour, True)
        approx = cv2.approxPolyDP(max_contour, 0.04 * peri, True)

        # 角点坐标(左上，左下，右下，右上)列行
        corners = approx.reshape((-1, 2))
        # corners = max_contour.reshape((-1, 2))
        y_min = np.min(corners[:, 0])
        y_min_index = np.where(corners[:, 0] == y_min)[0]
        x_min = np.min(corners[y_min_index, 1])
        left_top = np.array([y_min, x_min]).reshape(1, 2)
        y_max = np.max(corners[:, 0])
        y_max_index = np.where(corners[:, 0] == y_max)[0]
        x_max = np.max(corners[y_max_index, 1])
        right_down = np.array([y_max, x_max]).reshape(-1, 2)
        corners = np.concatenate([left_top, right_down], axis=0)
        return corners


def find_jiaodian(corners):
    temp = corners[:, 0] + corners[:, 1]
    index1 = np.argmin(temp)
    index2 = np.argmax(temp)
    return np.concatenate([corners[index1], corners[index2]]).reshape(1, 4)


def cal_result(name, result):
    ref_name, cy1, cx1, cy2, cx2 = os.path.basename(name)[:-4].split('_')
    x1, y1, x2, y2 = np.squeeze(result)
    cx1 = int(cx1)
    cy1 = int(cy1)
    x1 += cx1
    x2 += cx1
    y1 += cy1
    y2 += cy1
    return np.array([x1, y1, x2, y2]).reshape(1, 4)


def record(query, iou, path):
    result = np.concatenate([np.array(query).reshape(-1, 1), np.array(iou).reshape(-1, 1)], axis=1)
    fp = open(path + 'result.txt', 'w+')
    print(result, '\n', f'mean:{np.mean(np.array(iou))}', file=fp)
    fp.close()


def run(config):
    nrotate = config.nrotate
    crop_folder = config.al_crop_folder
    query_folder = config.input_path + '/test_image'
    corr = image_retrieval(config)
    # np.savetxt('match.txt',corr,'%s')

    # corr = np.loadtxt("../match.txt", dtype=object)
    query_list = corr[:, 0].tolist()
    crop_list = corr[:, 1].tolist()
    # test_iou = []
    config_filename = config.sp_config_filename
    with open(config_filename, 'r') as f:
        sp_config = yaml.safe_load(f)
        risg = RISGMatcher(sp_config)
    # rilf = RLoftrMatcher()
    # risf = RSIFTMatcher()

    # out_folder = input_outpath + '\sg200_6' + '//'
    for i in tqdm(range(0, len(query_list)), total=len(query_list)):
        # path0 = dir_crop_list[i]
        path0 = f'{crop_folder}/{crop_list[i]}'
        # path1 = dir_query_list[i]
        # path1 = f'{query_folder}/{query_list[i]}'
        path1 = os.path.join(query_folder, query_list[i])
        img0 = cv2.imread(path0)
        img1 = cv2.imread(path1)
        if (img0 is None) or (img1 is None):
            print('Error: Image file not found.')
            continue
        mkpts0, mkpts1, conf, main_dir, Mat = risg.match(img0, img1, nrotate=nrotate)
        # mkpts0, mkpts1, conf, main_dir, Mat = risf.match(img0, img1, nrotate=nrotate)
        # mkpts0, mkpts1, conf, main_dir, Mat = rilf.match(path0, path1, nrotate=nrotate)
        ref_name, cy1, cx1, cy2, cx2 = os.path.basename(crop_list[i])[:-4].split('_')
        if len(Mat) == 1:
            ref_name, cy1, cx1, cy2, cx2 = os.path.basename(crop_list[i])[:-4].split('_')
            result = np.array([int(cx1), int(cy1), int(cx2), int(cy2)]).reshape(1, 4)
        else:
            corners = find_corners1(path1)
            ori_corners = np.dot(Mat, add_ones(corners).T).T[:, 0:2]
            mid_result = find_jiaodian(ori_corners)
            result = cal_result(crop_list[i], mid_result)
        # ref_name, Ty1, Tx1, Ty2, Tx2 = os.path.basename(dir_query_list[i])[:-4].split('_')
        # true_loc = np.array([int(Ty1), int(Tx1), int(Ty2), int(Tx2)]).reshape(1, 4)
        # iou = calculate_ious(true_loc, result)
        # out_path = out_folder + query_list[i][0:-4] + '_' + str(iou)
        # sp lo
        # img = imp.drawlines(img1, img0, mkpts1, mkpts0, out_path)
        # sift
        # img = imp.drawlines(img0, img1, mkpts1, mkpts0, out_path)
        print_results(os.path.basename(path1), ref_name + '.TIF', result, config.output_path)
        # print(iou)
        # test_iou.append(iou)
        # record(query_list, test_iou, out_folder)
    # print('mean:', np.mean(np.array(test_iou)))
    # print(np.concatenate([np.array(query_list).reshape(-1, 1), np.array(test_iou).reshape(-1, 1)], axis=1))


def print_results(query_name, base_name, result, output_path):
    fdp.makefolder(output_path)
    fp = open(output_path + '/RESULT.txt', 'a+')
    # print(f'{query_name} ',f'{base_name} ',f'{result[0,0]} ',f'{result[0,1] } ' f'{result[0,2]} ',f'{result[0,3] }',file=fp)
    print(f'{query_name} {base_name} {result[0, 0]} {result[0, 1]} {result[0, 2]} {result[0, 3]}', file=fp)
    fp.close()


if __name__ == '__main__':
    # input_path = sys.argv[1]
    # output_path = sys.argv[2]
    input_path = 'E:/datasets/satgeoloc_dataset'
    output_path = 'E:/datasets/satgeoloc_dataset'
    config = Configuration()
    config.input_path = input_path
    config.output_path = output_path
    run(config)

