import os
import torch
import json
import random
# from omni_model.omni_space import OmniBind_Base
from tqdm import tqdm
from itertools import chain

def create_batches_from_json(json_path, base_folder, batch_size=32, modality="image"):
    """
    按照 JSON 文件的结构读取数据，支持图片或文字模态，生成路径或文本的 batch 和对应的 ID 列表。

    Args:
        json_path (str): JSON 文件路径。
        base_folder (str): 文件夹的根目录。
        batch_size (int): 每个 batch 的大小。
        modality (str): 模态类型，支持 "image" 或 "text"。

    Returns:
        tuple: 包含所有 batch 的列表和对应的 ID 列表。
    """
    # 加载 JSON 文件
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 存储所有数据和对应的 ID
    all_data = []
    all_ids = []

    if modality == "image":
        processed_image_ids = set()  # 用于跟踪已处理的 image_id
        for item in tqdm(data["annotations"], desc="Processing image data"):
            image_id = f"{item['image_id']:012d}.jpg"  # 格式化 image_id，补齐 12 位
            if image_id in processed_image_ids:
                continue  # 跳过重复的 image_id

            image_path = os.path.join(base_folder, image_id)
            if not os.path.exists(image_path):
                print(f"Warning: Image file {image_path} does not exist.")
                continue

            # 确保 image_path 和 image_id 同时添加
            all_data.append(image_path)
            all_ids.append(item["image_id"])
            processed_image_ids.add(image_id)  # 标记 image_id 为已处理

    elif modality == "text":
        for item in tqdm(data["annotations"], desc="Processing text data"):
            caption = item["caption"]
            all_data.append(caption)
            all_ids.append(item["id"])  # 保存 caption_id

    else:
        raise ValueError("Unsupported modality. Choose from 'image' or 'text'.")

    # 将所有数据分成 batch
    batches = [all_data[i:i + batch_size] for i in range(0, len(all_data), batch_size)]
    id_batches = [all_ids[i:i + batch_size] for i in range(0, len(all_ids), batch_size)]

    return batches, id_batches

def evaluate_omnibind_with_batches(model, json_path, base_folder, batch_size=32, save_features=False, feature_dir=None, chunk_size=5000):
    """
    使用 OmniBind_Base 模型在图像和文本模态之间进行检索，支持批量处理，并可保存或加载特征。
    如果模型为 None，则直接从文件加载特征。

    Args:
        model (OmniBind_Base or None): 已加载的 OmniBind_Base 模型，或 None 表示直接加载特征。
        json_path (str): JSON 文件路径。
        base_folder (str): 文件夹的根目录，包含 frames 子文件夹。
        batch_size (int): 每个 batch 的大小。
        save_features (bool): 是否保存特征到文件。
        feature_dir (str): 特征文件保存或加载的目录。
        chunk_size (int): 每个特征块的大小。

    Returns:
        dict: 包含检索评估指标的字典。
    """
    # 加载 JSON 文件
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 构造图片 ID 和文字 ID 的对应关系
    image_to_caption = {}
    caption_to_image = {}
    for item in data["annotations"]:
        image_id = item["image_id"]
        caption_id = item["id"]
        if image_id not in image_to_caption:
            image_to_caption[image_id] = []
        image_to_caption[image_id].append(caption_id)
        caption_to_image[caption_id] = image_id  # 添加 caption 到 image 的映射

    # 检查特征文件目录
    if feature_dir and not os.path.exists(feature_dir):
        os.makedirs(feature_dir)

    # 图像特征文件路径
    image_feature_path = os.path.join(feature_dir, "deduplicated_image_features.pkl") if feature_dir else None
    text_feature_path = os.path.join(feature_dir, "coco_val2017_text_features.pt") if feature_dir else None
    image_ids_path = os.path.join(feature_dir, "deduplicated_image_ids.pkl") if feature_dir else None
    text_ids_path = os.path.join(feature_dir, "coco_val2017_text_ids.pt") if feature_dir else None

    # 提取或加载图像模态的特征
    if feature_dir and os.path.exists(image_feature_path) and os.path.exists(image_ids_path):
        print(f"Loading image features from {image_feature_path}...")
        feat1 = torch.load(image_feature_path)
        ids1 = torch.load(image_ids_path)
    else:
        print("Creating batches for images...")
        image_batches, ids1 = create_batches_from_json(json_path, base_folder, batch_size, modality="image")
        print("Extracting features for images...")
        feat1_list = []

        # Process batches and save intermediate features for both modalities
        for modality, batches, feat_list, intermediate_dir, feature_prefix in [
            ("image", image_batches, feat1_list, os.path.join(feature_dir, "image_intermediate"), "image_features_chunk")
        ]:
            os.makedirs(intermediate_dir, exist_ok=True)
            chunk_index = 0

            for i, batch in enumerate(tqdm(batches, desc=f"Processing {modality} batches")):
                # Directly process the batch without splitting
                features = model.emb_images(batch)
                feat_list.append(features)

                # Save intermediate features to disk if the list size exceeds the chunk size
                if len(feat_list) >= chunk_size:
                    intermediate_features = torch.cat(feat_list, dim=0)
                    intermediate_path = os.path.join(intermediate_dir, f"{feature_prefix}_{chunk_index}.pt")
                    torch.save(intermediate_features, intermediate_path)
                    print(f"Saved intermediate features to {intermediate_path}")
                    feat_list.clear()  # Clear the list to free memory
                    chunk_index += 1

            # Save any remaining features
            if feat_list:
                intermediate_features = torch.cat(feat_list, dim=0)
                intermediate_path = os.path.join(intermediate_dir, f"{feature_prefix}_{chunk_index}.pt")
                torch.save(intermediate_features, intermediate_path)
                print(f"Saved intermediate features to {intermediate_path}")

        feat1 = torch.cat(feat1_list, dim=0)
        if save_features and feature_dir:
            print(f"Saving image features to {image_feature_path}...")
            torch.save(feat1, image_feature_path)
            torch.save(ids1, image_ids_path)

    # 提取或加载文本模态的特征
    if feature_dir and os.path.exists(text_feature_path) and os.path.exists(text_ids_path):
        print(f"Loading text features from {text_feature_path}...")
        feat2 = torch.load(text_feature_path)
        ids2 = torch.load(text_ids_path)
    else:
        print("Creating batches for texts...")
        text_batches, ids2 = create_batches_from_json(json_path, base_folder, batch_size, modality="text")
        print("Extracting features for texts...")
        feat2_list = []

        # Process batches and save intermediate features for both modalities
        for modality, batches, feat_list, intermediate_dir, feature_prefix in [
            ("text", text_batches, feat2_list, os.path.join(feature_dir, "text_intermediate"), "text_features_chunk")
        ]:
            os.makedirs(intermediate_dir, exist_ok=True)
            chunk_index = 0

            for i, batch in enumerate(tqdm(batches, desc=f"Processing {modality} batches")):
                # Directly process the batch without splitting
                features = model.emb_texts(batch)
                feat_list.append(features)

                # Save intermediate features to disk if the list size exceeds the chunk size
                if len(feat_list) >= chunk_size:
                    intermediate_features = torch.cat(feat_list, dim=0)
                    intermediate_path = os.path.join(intermediate_dir, f"{feature_prefix}_{chunk_index}.pt")
                    torch.save(intermediate_features, intermediate_path)
                    print(f"Saved intermediate features to {intermediate_path}")
                    feat_list.clear()  # Clear the list to free memory
                    chunk_index += 1

            # Save any remaining features
            if feat_list:
                intermediate_features = torch.cat(feat_list, dim=0)
                intermediate_path = os.path.join(intermediate_dir, f"{feature_prefix}_{chunk_index}.pt")
                torch.save(intermediate_features, intermediate_path)
                print(f"Saved intermediate features to {intermediate_path}")

        feat2 = torch.cat(feat2_list, dim=0)
        if save_features and feature_dir:
            print(f"Saving text features to {text_feature_path}...")
            torch.save(feat2, text_feature_path)
            torch.save(ids2, text_ids_path)

    # 计算相似度矩阵
    print("Computing similarity matrix...")
    score_matrix = torch.matmul(feat1, feat2.T)

    # 计算检索指标
    print("Computing retrieval metrics...")
    forward_metrics = compute_metric_ret(score_matrix, ids1, ids2, image_to_caption, caption_to_image, direction="forward")
    backward_metrics = compute_metric_ret(score_matrix, ids1, ids2, image_to_caption, caption_to_image, direction="backward")

    # 合并结果
    metrics = {**forward_metrics, **backward_metrics}
    return metrics


def compute_metric_ret(score_matrix, ids1, ids2, image_to_caption, caption_to_image, direction="forward"):
    """
    计算检索指标。

    Args:
        score_matrix (torch.Tensor): 相似度矩阵。
        ids1 (list): 第一个模态的 ID 列表。
        ids2 (list): 第二个模态的 ID 列表。
        image_to_caption (dict): 图片 ID 和文字 ID 的对应关系。
        caption_to_image (dict): 文字 ID 和图片 ID 的对应关系。
        direction (str): 检索方向 ("forward" 或 "backward")。

    Returns:
        dict: 包含检索指标的字典。
    """
    assert score_matrix.shape == (len(ids1), len(ids2))

    if direction == "forward":  # 模态1 -> 模态2
        indice_matrix = score_matrix.sort(dim=-1, descending=True)[1].tolist()
        rank = []
        for i, image_id in enumerate(ids1):
            if image_id in image_to_caption:
                gt_indices = [ids2.index(caption_id) for caption_id in image_to_caption[image_id] if caption_id in ids2]
                rank.append(min(indice_matrix[i].index(gt_index) for gt_index in gt_indices))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids1)
        r5 = (rank < 5).sum().item() / len(ids1)
        r10 = (rank < 10).sum().item() / len(ids1)

        metrics = {
            "forward_r1": round(r1 * 100, 1),
            "forward_r5": round(r5 * 100, 1),
            "forward_r10": round(r10 * 100, 1),
        }

    else:  # 模态2 -> 模态1
        indice_matrix = score_matrix.sort(dim=0, descending=True)[1].permute(1, 0).tolist()
        rank = []
        for i, caption_id in enumerate(ids2):
            if caption_id in caption_to_image:
                image_id = caption_to_image[caption_id]
                if image_id in ids1:
                    gt_index = ids1.index(image_id)
                    rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids2)
        r5 = (rank < 5).sum().item() / len(ids2)
        r10 = (rank < 10).sum().item() / len(ids2)

        metrics = {
            "backward_r1": round(r1 * 100, 1),
            "backward_r5": round(r5 * 100, 1),
            "backward_r10": round(r10 * 100, 1),
        }

    return metrics


# 示例用法
if __name__ == "__main__":
    # 加载模型
    # model = OmniBind_Base(pretrained=True).cuda().eval()

    # JSON 文件路径
    json_path = "/data/jzw/MSCOCO/annotations/captions_val2017.json"

    # 根文件夹路径（用于图片模态）
    base_folder = "/data/jzw/MSCOCO/val2017"

    # 每个 batch 的大小
    batch_size = 32

    # 特征保存目录
    feature_dir = "/data/jzw/MSCOCO/features"

    # 在图像和文本模态之间进行检索
    metrics = evaluate_omnibind_with_batches(
        None,
        json_path=json_path,
        base_folder=base_folder,
        batch_size=batch_size,
        save_features=True,
        feature_dir=feature_dir,
    )

    # 打印结果
    print("Retrieval Metrics:")
    for k, v in metrics.items():
        print(f"{k}: {v}")