import cv2
import os
from dataclasses import dataclass
from process_image.utils.superpoint import SuperPoint
import torch
import numpy as np
import h5py as h5
from tqdm import tqdm
import faiss
from glob import glob
from time import time
import random


def numpy_image_to_torch(image: np.ndarray) -> torch.Tensor:
    """Normalize the image tensor and reorder the dimensions."""
    if image.ndim == 3:
        image = image.transpose((2, 0, 1))  # HxWxC to CxHxW
    elif image.ndim == 2:
        image = image[None]  # add channel axis
    else:
        raise ValueError(f"Not an image: {image.shape}")
    return torch.tensor(image / 255.0, dtype=torch.float)


def extract_features(path, device, num_pts=None):
    img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
    img_tensor = numpy_image_to_torch(img[..., ::-1]).to(device)
    extractor = SuperPoint(max_num_keypoints=num_pts).eval().cuda()
    features = extractor.extract(img_tensor)
    del img, img_tensor
    return features


def read_file(path):
    result = []
    with h5.File(path, 'a') as f:
        img_list = list(f.keys())
        for img_name in img_list:
            descriptors = np.array(f[img_name]['descriptors'])
            result.append(descriptors)
            del descriptors
    f.close()
    return np.concatenate(result, axis=0)


def retrieval(query_features, reference_features):
    if query_features.shape[0] > reference_features.shape[0]:
        temp = query_features
        query_features = reference_features
        reference_features = temp        
        del temp

    embed_size = reference_features.shape[1]
    # faiss_index = faiss.IndexFlatL2(embed_size)
    faiss_index = faiss.IndexFlatIP(embed_size)  # 使用余弦相似度计算特征距离，而不是欧式距离
    faiss_index.add(reference_features)
    del reference_features

    distance, _ = faiss_index.search(query_features, 1)
    mean_distance = np.mean(distance)
    return mean_distance


def retrieval_fine_grained(query_feature, path):  # 分块搜索，不使用
    embed_size = query_feature.shape[1]
    # faiss_index = faiss.IndexFlatL2(embed_size)
    faiss_index = faiss.IndexFlatIP(embed_size)

    with h5.File(path, 'a') as f:
        result = []
        img_list = list(f.keys())
        for img_name in img_list:
            descriptors = np.array(f[img_name]['descriptors'])

            faiss_index.add(descriptors)
            distance, predictions = faiss_index.search(query_feature, 1)
            mean_distance = np.mean(distance)
            result.append(mean_distance)
            del descriptors, predictions
        img_corners = img_list[result.index(max(result))]
    f.close()
    img_corners = [int(coord) for coord in img_corners.split('_')]
    return img_corners


def read_zoom_extract(path, device, num_pts=None):
    img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
    h, w = img.shape[:2]
    if (h > 2000) and (w > 2000):
        img = cv2.resize(img, (1000, 1000), interpolation=cv2.INTER_LINEAR)
        rate = ((h/1000) + (w/1000)) / 2
    else:
        rate = 1
    img_tensor = numpy_image_to_torch(img[..., ::-1]).to(device)
    extractor = SuperPoint(max_num_keypoints=num_pts).eval().cuda()
    features = extractor.extract(img_tensor)
    del img, img_tensor
    return features , rate
    

def cal_dis(point, points_list):
    point = np.column_stack(((point[:, 0] + point[:, 2])/2, (point[:, 1] + point[:, 3])/2))
    points_list = np.column_stack(((points_list[:, 0] + points_list[:, 2])/2, (points_list[:, 1] + points_list[:, 3])/2))
    distances = np.linalg.norm(points_list - point, axis=1).argsort()
    return distances


def search_infile(name, h5_file):  # name= H49D001012_0_0_1000_1000, h5_file= h5.File(r'D:\Temp\database/4.h5', 'a')
    img_name = name.split('_')[0]
    img_coord = np.array([int(coord) for coord in name.split('_')[1:]]).reshape(1, 4)

    target_list = list(h5_file.keys())
    refer_name = [s for s in target_list if s.startswith(img_name)]

    result = []
    for item in refer_name:
        idx, x, y, x_end, y_end = item.split('_')
        result.append([int(x), int(y), int(x_end), int(y_end)])
    result = np.array(result)
    distance = cal_dis(img_coord, result)
    sorted_list = [refer_name[i] for i in distance]
    return sorted_list


def run(data_path, query_features):

    dis_list = []
    with h5.File(data_path, 'a') as f:
        img_list = list(f.keys())
        for img_name in img_list:
            reference_features = np.array(f[img_name]['descriptors'])
            distance = retrieval(query_features, reference_features)
            dis_list.append(distance)
            del reference_features
    f.close()

    reference_path = list(np.array(img_list)[np.argsort(dis_list)[::-1]])
    return reference_path


def calculate_ious(gt_box, pred_boxes):
    """
    计算一个真实框与多个预测框之间的交并比 (IoU)。

    参数:
    - gt_box: 形状为(1, 4)的numpy数组，表示真实框的左上、右下像素坐标。
    - pred_boxes: 形状为(n, 4)的numpy数组，表示n个预测框的左上、右下像素坐标。

    返回:
    - 一个长度为n的numpy数组，包含每个预测框与真实框的交并比。
    """

    # 提取坐标
    xmin_gt, ymin_gt, xmax_gt, ymax_gt = gt_box.squeeze()
    xmin_pred, ymin_pred, xmax_pred, ymax_pred = pred_boxes.T

    # 计算交集的坐标
    inter_xmin = np.maximum(xmin_gt, xmin_pred)
    inter_ymin = np.maximum(ymin_gt, ymin_pred)
    inter_xmax = np.minimum(xmax_gt, xmax_pred)
    inter_ymax = np.minimum(ymax_gt, ymax_pred)

    # 计算交集面积，避免负数
    inter_area = np.maximum(0, inter_xmax - inter_xmin) * np.maximum(0, inter_ymax - inter_ymin)

    # 计算真实框和预测框各自的面积
    area_gt = (xmax_gt - xmin_gt) * (ymax_gt - ymin_gt)
    area_pred = (xmax_pred - xmin_pred) * (ymax_pred - ymin_pred)

    # 计算并集面积
    union_area = area_gt + area_pred - inter_area

    # 计算IoU
    iou = inter_area / union_area

    # 处理可能出现的除以零情况（当预测框或真实框面积为0时）
    iou = np.where(union_area == 0, 0, iou)  # 如果并集面积为0，则IoU设为0

    return iou


# def run(config, query_features):
#     if config.rate != 1:
#         res = [zoom for zoom in config.zoom_list if zoom > rate / 2]
#         data_list = [file for i in res for file in glob(f'{config.input_path}/database3/*{i}.h5')]
#     else:
#         data_list = glob(f'{config.input_path}/database3/*.h5')

#     dis_list = []
#     for data_path in data_list:
#         reference_features = read_file(data_path)
#         distance = retrieval(query_features, reference_features)
#         dis_list.append(distance)
#         del reference_features
#     # reference_path = data_list[dis_list.index(max(dis_list))]
#     reference_path = list(np.array(data_list)[np.argsort(dis_list)[::-1][:config.top_n]])
#     return reference_path


@dataclass
class Configuration:
    zoom_list=[16, 8, 4, 2]
    device: str = "cuda"
    input_path = r'D:\Temp'
    num_pts = 500


if __name__ == '__main__':

    # name= 'H49D001012_3000_3000_4000_4000'
    # h5_file= h5.File(r'D:\Temp\database2/4.h5', 'a')
    # re = search_infile(name, h5_file)

    config = Configuration()
    query_list = os.listdir(f'{config.input_path}/test_image')
    random.seed(3407)
    random.shuffle(query_list)
    query_list = query_list[:20]

    rank = {}
    for z in config.zoom_list:
        rank[z] = []

    for query_name in query_list:
        query_path = f'{config.input_path}/test_image/{query_name}'

        qqq = query_name.split('_')
        gt = np.array([int(qqq[-4]), int(qqq[-3]), int(qqq[-2]), int(qqq[-1][:-4])])
        print(f'No {query_list.index(query_name)}: {query_name}')

        img_features, rate = read_zoom_extract(query_path, config.device)
        zooms = [zoom for zoom in config.zoom_list if zoom > rate / 2]

        descriptors = np.squeeze(img_features['descriptors'].detach().cpu().numpy())
        keypoint_scores = np.squeeze(img_features['keypoint_scores'].detach().cpu().numpy())

        for zoom in zooms:
            
            data_path = f'{config.input_path}/database2/{zoom}.h5'
            st_time = time()
            reference_path = run(data_path, descriptors)
            need_time = time() - st_time

            pred = []
            for ref in reference_path:
                if ref.upper().startswith(qqq[0].upper()):
                    rrr = ref.split('_')
                    pred.append(np.array([int(rrr[-4]), int(rrr[-3]), int(rrr[-2]), int(rrr[-1])]))
                else:
                    pred.append(np.array([0, 0, 0, 0]))
            iou = calculate_ious(gt, np.array(pred))
            print(f'Zoom: {zoom}, Rank: {np.argsort(iou)[::-1][0]}, IOU: {iou[np.argsort(iou)[::-1][0]]}, Time: {need_time}')
            rank[zoom].append(np.argsort(iou)[::-1][0])

    for key, value in rank.items():
        print(f'Zoom: {key}, Rank: {np.mean(np.array(value))}')

            # base_img = [os.path.basename(path).split('_')[0] for path in reference_path]
            # print(need_time)

