import os
import torch
import json
import random
from omni_model.omni_space import OmniBind_Base
from tqdm import tqdm

def create_batches_from_json(json_path, base_folder, batch_size=32, modality="image", audio_folder=None):
    """
    按照 JSON 文件的结构读取数据，支持图片、音频或文字模态，生成路径或文本的 batch。

    Args:
        json_path (str): JSON 文件路径，每个键是文件夹名称。
        batch_size (int): 每个 batch 的大小。
        modality (str): 模态类型，支持 "image"、"audio"、"text"。
        audio_folder (str): 音频文件夹路径（用于音频模态）。

    Returns:
        list: 包含所有 batch 的列表，每个 batch 是一个路径或文本列表。
    """
    # 加载 JSON 文件
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 存储所有数据
    all_data = []

    if modality == "image":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing image folders"):
            folder_name = item["video_id"]  # 假设每个元素有一个 "id" 字段表示文件夹名称
            folder_path = os.path.join(base_folder, folder_name)

            # 检查文件夹是否存在
            if not os.path.exists(folder_path):
                print(f"Warning: Folder {folder_path} does not exist.")
                continue

            # 获取文件夹中的所有照片
            images = [os.path.join(folder_path, img) for img in os.listdir(folder_path) if img.endswith(('.jpg', '.png'))]

            # 随机选择一张照片
            selected_image = random.choice(images)
            all_data.append(selected_image)

    elif modality == "audio":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing audio files"):
            if audio_folder is None:
                raise ValueError("audio_folder must be specified for audio modality.")

            key = item["video_id"]  # 假设每个元素有一个 "id" 字段表示音频文件名
            # 构建音频文件路径
            audio_path = os.path.join(audio_folder, f"{key}.wav")

            # 检查音频文件是否存在
            if not os.path.exists(audio_path):
                print(f"Warning: Audio file {audio_path} does not exist.")
                continue

            all_data.append(audio_path)

    elif modality == "text":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing text data"):
            value = item["desc"]  # 假设每个元素有一个 "description" 字段表示文本内容
            all_data.append(value)

    else:
        raise ValueError("Unsupported modality. Choose from 'image', 'audio', or 'text'.")

    # 将所有数据分成 batch
    batches = [all_data[i:i + batch_size] for i in range(0, len(all_data), batch_size)]

    return batches

def evaluate_omnibind_with_batches(model, modality1, modality2, json_path, base_folder, audio_folder, batch_size=32):
    """
    使用 OmniBind_Base 模型在两个模态之间进行检索，支持批量处理。

    Args:
        model (OmniBind_Base): 已加载的 OmniBind_Base 模型。
        modality1 (str): 第一个模态的名称（如 "audio", "image", "text"）。
        modality2 (str): 第二个模态的名称（如 "audio", "image", "text"）。
        json_path (str): JSON 文件路径。
        base_folder (str): 文件夹的根目录，包含 frames 和 audio 子文件夹。
        audio_folder (str): 音频文件夹路径。
        batch_size (int): 每个 batch 的大小。

    Returns:
        dict: 包含检索评估指标的字典。
    """
    # 创建第一个模态的批次
    print(f"Creating batches for {modality1}...")
    batches1 = create_batches_from_json(json_path, base_folder, batch_size, modality1, audio_folder)

    # 创建第二个模态的批次
    print(f"Creating batches for {modality2}...")
    batches2 = create_batches_from_json(json_path, base_folder, batch_size, modality2, audio_folder)

    assert len(batches1) == len(batches2)

    # 提取第一个模态的特征
    print(f"Extracting features for {modality1}...")
    feat1_list = []
    for batch in tqdm(batches1, desc=f"Processing {modality1} batches"):
        if modality1 == "audio":
            feat1 = model.emb_audios(batch)
        elif modality1 == "image":
            feat1 = model.emb_images(batch)
        elif modality1 == "text":
            feat1 = model.emb_texts(batch)
        else:
            raise ValueError(f"Unsupported modality: {modality1}")
        feat1_list.append(feat1)
    feat1 = torch.cat(feat1_list, dim=0)

    # 提取第二个模态的特征
    print(f"Extracting features for {modality2}...")
    feat2_list = []
    for batch in tqdm(batches2, desc=f"Processing {modality2} batches"):
        if modality2 == "audio":
            feat2 = model.emb_audios(batch)
        elif modality2 == "image":
            feat2 = model.emb_images(batch)
        elif modality2 == "text":
            feat2 = model.emb_texts(batch)
        else:
            raise ValueError(f"Unsupported modality: {modality2}")
        feat2_list.append(feat2)
    feat2 = torch.cat(feat2_list, dim=0)

    # 计算相似度矩阵
    print("Computing similarity matrix...")
    score_matrix = torch.matmul(feat1, feat2.T)

    # 构造 ID 列表
    ids1 = list(range(len(feat1)))
    ids2 = list(range(len(feat2)))

    # 计算检索指标
    print("Computing retrieval metrics...")
    forward_metrics = compute_metric_ret(score_matrix, ids1, ids2, direction="forward")
    backward_metrics = compute_metric_ret(score_matrix, ids1, ids2, direction="backward")

    # 合并结果
    metrics = {**forward_metrics, **backward_metrics}
    return metrics


def compute_metric_ret(score_matrix, ids1, ids2, direction="forward"):
    """
    计算检索指标。

    Args:
        score_matrix (torch.Tensor): 相似度矩阵。
        ids1 (list): 第一个模态的 ID 列表。
        ids2 (list): 第二个模态的 ID 列表。
        direction (str): 检索方向 ("forward" 或 "backward")。

    Returns:
        dict: 包含检索指标的字典。
    """
    assert score_matrix.shape == (len(ids1), len(ids2))

    if direction == "forward":  # 模态1 -> 模态2
        indice_matrix = score_matrix.sort(dim=-1, descending=True)[1].tolist()
        rank = []
        for i in range(len(ids1)):
            gt_index = ids2.index(ids1[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids1)
        r5 = (rank < 5).sum().item() / len(ids1)
        r10 = (rank < 10).sum().item() / len(ids1)

        metrics = {
            "forward_r1": round(r1 * 100, 1),
            "forward_r5": round(r5 * 100, 1),
            "forward_r10": round(r10 * 100, 1),
        }

    else:  # 模态2 -> 模态1
        indice_matrix = score_matrix.sort(dim=0, descending=True)[1].permute(1, 0).tolist()
        rank = []
        for i in range(len(ids2)):
            gt_index = ids1.index(ids2[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids2)
        r5 = (rank < 5).sum().item() / len(ids2)
        r10 = (rank < 10).sum().item() / len(ids2)

        metrics = {
            "backward_r1": round(r1 * 100, 1),
            "backward_r5": round(r5 * 100, 1),
            "backward_r10": round(r10 * 100, 1),
        }

    return metrics


# 示例用法
if __name__ == "__main__":
    # 加载模型
    model = OmniBind_Base(pretrained=True).cuda().eval()

    # JSON 文件路径
    json_path = "/data/jzw/valor-32k-annotations/desc_val_filtered.json"

    # 根文件夹路径（用于图片模态）
    base_folder = "/data/jzw/processed_valor32k_val/frames"

    # 音频文件夹路径（用于音频模态）
    audio_folder = "/data/jzw/processed_valor32k_val/audio"

    # 每个 batch 的大小
    batch_size = 32

    # 在音频和图像模态之间进行检索
    metrics = evaluate_omnibind_with_batches(
        model,
        modality1="image",
        modality2="text",
        json_path=json_path,
        base_folder=base_folder,
        audio_folder=audio_folder,
        batch_size=batch_size,
    )



    # 打印结果
    print("Retrieval Metrics:")
    for k, v in metrics.items():
        print(f"{k}: {v}")