import os.path
from glob import glob
from os.path import join
import h5py
import glob

import albumentations as A
import cv2
import faiss
import numpy as np
import torch
import torch.utils.data as data
from albumentations.pytorch import ToTensorV2
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from configuration import Configuration

from model import TimmModel


def get_transform(img_size):
    base_transform = A.Compose([
        A.Resize(img_size, img_size, interpolation=cv2.INTER_LINEAR_EXACT, p=1.0),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensorV2(),
        ])
    return base_transform

class QueryDataset(data.Dataset):
    def __init__(self, config) -> None:
        super().__init__()

        self.img_transform = get_transform(config.img_size)
        self.img_list = glob.glob(join(config.input_path, 'test_image', "*"), recursive=True)

    def __getitem__(self, index):
        img_path = self.img_list[index]
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # img = self.img_transform(img)
        img = self.img_transform(image=img)['image']
        return img, index

    def __len__(self):
        return len(self.img_list)


def load_reference_features(h5py_path):
    with h5py.File(h5py_path, 'r') as hf:
        keys = [key.decode('utf-8') for key in hf['keys'][:]]
        values = hf['values'][:]

    sim_dict = dict(zip(keys, values))
    return sim_dict

def load_model(config):
    model = TimmModel(
        model_name=config.model, pretrained=config.pretrained, img_size=config.img_size,
        backbone_arch=config.backbone_arch, agg_arch=config.agg_arch, agg_config=config.agg_config,
        layer1=config.layer1,
    )

    if config.checkpoint_start is not None:
        # print("Load Checkpoints From:", config.checkpoint_start)
        model_state_dict = torch.load(config.checkpoint_start)
        model.load_state_dict(model_state_dict, strict=False)
        del model_state_dict

    return model


def evaluate(model, config):
    query_dataset = QueryDataset(config)
    query_dataloader = DataLoader(
        query_dataset, batch_size=config.batch_size_eval, num_workers=config.num_workers, shuffle=False,
        pin_memory=False,
    )
    model.eval()
    query_feature_list = []
    with torch.no_grad():
        for img, _ in tqdm(query_dataloader, total=len(query_dataloader)):
            with torch.cuda.amp.autocast():
                img = img.to(config.device)
                img_feature = model(img)
            query_feature_list.append(img_feature.to(torch.float32))
        query_feature = torch.cat(query_feature_list, dim=0)

    del model
    # 初始化检索结果列表
    all_distances = []
    all_predictions = []

    # 获取所有HDF5文件路径
    h5py_path_list = sorted(glob.glob(os.path.join(config.h5py_base_path, '*.h5')))
    for h5py_path in h5py_path_list:
        # 每次只加载一个HDF5文件
        sim_dict = load_reference_features(h5py_path)
        reference_list = np.array(list(sim_dict.keys()))
        reference_feature = np.vstack(list(sim_dict.values()))

        embed_size = reference_feature.shape[1]
        faiss_index = faiss.IndexFlatL2(embed_size)
        faiss_index.add(reference_feature)
        del sim_dict
        del reference_feature

        # 进行检索
        distance, predictions = faiss_index.search(query_feature.cpu().numpy(), 5)  # 修改这里为5

        predictions_name = reference_list[predictions]

        # 将结果添加到总列表中
        all_distances.append(distance)
        all_predictions.append(predictions_name)

    # 合并所有检索结果
    all_distances = np.concatenate(all_distances, axis=1)
    all_predictions = np.concatenate(all_predictions, axis=1)

    # 确定每个查询图像的最佳匹配
    best_distance_idx = np.argsort(all_distances, axis=1)[:, :config.crop_top_k]  # 修改为前5个索引
    best_predictions = all_predictions[np.arange(len(best_distance_idx))[:, None], best_distance_idx]

    query_list = query_dataset.img_list

    return query_list, best_predictions

def crop_base_img(queried_image, config):
    img_name, h_lt, w_lt, h_rb, w_rb = queried_image.split('.')[0].split('_')
    base_img = cv2.imread(f'{config.input_path}/base_map/{img_name}.TIF')
    crop_img = base_img[int(h_lt):int(h_rb)+1, int(w_lt):int(w_rb)+1]
    cv2.imwrite(f'{config.al_crop_folder}/{queried_image}', crop_img)


def image_retrieval(config):
    # config.input_path = input_path
    model = load_model(config).to(config.device)
    query_list, predictions = evaluate(model, config)
    query_array = np.empty((len(query_list), config.crop_top_k + 1), dtype=object)

    os.makedirs(config.al_crop_folder, exist_ok=True)

    for q_idx, reference_names in enumerate(predictions):
        query_img = query_list[q_idx]
        query_name = os.path.basename(query_img)
        query_array[q_idx, 0] = query_name
        for i, reference_name in enumerate(reference_names):
            crop_base_img(reference_name, config)
            query_array[q_idx, i+1] = str(reference_name)
    return query_array

if __name__ == '__main__':
    # input_path = r'E:\satLocate'
    input_path = r'E:/datasets/satgeoloc_dataset'
    config = Configuration()
    config.input_path = input_path
    query_dict = image_retrieval(config)
    print(query_dict)
