import torch
from omni_model.omni_space import OmniBind_Base
import os
import json
import random
from tqdm import tqdm

def create_batches_from_json(json_path, base_folder, batch_size=32, modality="image", audio_folder=None):
    """
    按照 JSON 文件的结构读取数据，支持图片、音频或文字模态，生成路径或文本的 batch。

    Args:
        json_path (str): JSON 文件路径，每个键是文件夹名称。
        batch_size (int): 每个 batch 的大小。
        modality (str): 模态类型，支持 "image"、"audio"、"text"。
        audio_folder (str): 音频文件夹路径（用于音频模态）。

    Returns:
        list: 包含所有 batch 的列表，每个 batch 是一个路径或文本列表。
    """
    # 加载 JSON 文件
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 存储所有数据
    all_data = []

    if modality == "image":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing image folders"):
            folder_name = item["video_id"]  # 假设每个元素有一个 "id" 字段表示文件夹名称
            folder_path = os.path.join(base_folder, folder_name)

            # 检查文件夹是否存在
            if not os.path.exists(folder_path):
                print(f"Warning: Folder {folder_path} does not exist.")
                continue

            # 获取文件夹中的所有照片
            images = [os.path.join(folder_path, img) for img in os.listdir(folder_path) if img.endswith(('.jpg', '.png'))]

            # 随机选择一张照片
            selected_image = random.choice(images)
            all_data.append(selected_image)

    elif modality == "audio":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing audio files"):
            if audio_folder is None:
                raise ValueError("audio_folder must be specified for audio modality.")

            key = item["video_id"]  # 假设每个元素有一个 "id" 字段表示音频文件名
            # 构建音频文件路径
            audio_path = os.path.join(audio_folder, f"{key}.wav")

            # 检查音频文件是否存在
            if not os.path.exists(audio_path):
                print(f"Warning: Audio file {audio_path} does not exist.")
                continue

            all_data.append(audio_path)

    elif modality == "text":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing text data"):
            value = item["desc"]  # 假设每个元素有一个 "description" 字段表示文本内容
            all_data.append(value)

    else:
        raise ValueError("Unsupported modality. Choose from 'image', 'audio', or 'text'.")

    # 将所有数据分成 batch
    batches = [all_data[i:i + batch_size] for i in range(0, len(all_data), batch_size)]

    return batches

def evaluate_with_intermediate_modality(
    model: OmniBind_Base,
    primary_modality1: str,
    primary_modality2: str,
    intermediate_modality: str,
    json_path: str,
    base_folder: str,
    audio_folder: str,
    batch_size: int = 32
) -> dict:
    """
    使用中间模态增强检索效果的评估函数
    """
    # 创建主模态和中间模态的联合批次
    print(f"Creating batches for {primary_modality1} & {intermediate_modality}...")
    batches1 = create_batches_from_json(json_path, base_folder, batch_size, primary_modality1, audio_folder)
    batches_mid = create_batches_from_json(json_path, base_folder, batch_size, intermediate_modality, audio_folder)
    
    # 创建中间模态和主模态2的联合批次
    print(f"Creating batches for {intermediate_modality} & {primary_modality2}...")
    batches2 = create_batches_from_json(json_path, base_folder, batch_size, primary_modality2, audio_folder)
    
    # 提取主模态1的所有特征
    print(f"Extracting {primary_modality1} features...")
    feat1_list = []
    for b1 in batches1:
        if primary_modality1 == "audio":
            feat1 = model.emb_audios(b1)
        elif primary_modality1 == "image":
            feat1 = model.emb_images(b1)
        else:
            feat1 = model.emb_texts(b1)
        feat1_list.append(feat1)
    feat1 = torch.cat(feat1_list, dim=0)
    
    # 提取中间模态的所有特征
    print(f"Extracting {intermediate_modality} features...")
    feat_mid_list = []
    for b_mid in batches_mid:
        if intermediate_modality == "audio":
            feat_mid = model.emb_audios(b_mid)
        elif intermediate_modality == "image":
            feat_mid = model.emb_images(b_mid)
        else:
            feat_mid = model.emb_texts(b_mid)
        feat_mid_list.append(feat_mid)
    feat_mid = torch.cat(feat_mid_list, dim=0)
    
    # 提取主模态2的所有特征
    print(f"Extracting {primary_modality2} features...")
    feat2_list = []
    for b2 in batches2:
        if primary_modality2 == "audio":
            feat2 = model.emb_audios(b2)
        elif primary_modality2 == "image":
            feat2 = model.emb_images(b2)
        else:
            feat2 = model.emb_texts(b2)
        feat2_list.append(feat2)
    feat2 = torch.cat(feat2_list, dim=0)
    
    # 计算主模态1 -> 中间模态的相似度矩阵
    print(f"Computing {primary_modality1} -> {intermediate_modality} similarity matrix...")
    feat1_mid = feat1 @ feat_mid.T
    feat1_mid = feat1_mid / (feat1_mid.norm(dim=1, keepdim=True) + 1e-8)  # 行归一化

    # 计算中间模态 -> 主模态2的相似度矩阵
    print(f"Computing {intermediate_modality} -> {primary_modality2} similarity matrix...")
    feat_mid2 = feat_mid @ feat2.T
    feat_mid2 = feat_mid2 / (feat_mid2.norm(dim=0, keepdim=True) + 1e-8)  # 列归一化
    # 利用中间模态计算主模态1 -> 主模态2的相似度矩阵
    print(f"Computing {primary_modality1} -> {primary_modality2} similarity matrix...")
    score_matrix = feat1_mid @ feat_mid2.T

    # # 新方法：使用中间模态前10个检索结果进行加权
    # print("Computing final similarity matrix with weighted top-k intermediate modality features...")
    # top_k = 10
    # score_matrix = torch.zeros(feat1.size(0), feat2.size(0), device=feat1.device)

    # for i in range(feat1_mid.size(0)):
    #     # 获取中间模态的前top_k个检索结果及其相似度
    #     top_k_indices = torch.topk(feat1_mid[i], top_k, dim=0).indices
    #     top_k_similarities = torch.topk(feat1_mid[i], top_k, dim=0).values

    #     # # 归一化相似度作为权重
    #     # weights = top_k_similarities / (top_k_similarities.sum() + 1e-8)
    #     # # 使用 Softmax 归一化相似度作为权重
    #     # weights = torch.softmax(top_k_similarities, dim=0)

    #     # # 加权计算最终相似度
    #     # weighted_feat_mid = (weights.unsqueeze(1) * feat_mid[top_k_indices]).sum(dim=0)
    #     # 直接top-k个特征的均值作为查询
    #     weighted_feat_mid = feat_mid[top_k_indices].mean(dim=0)
    #     score_matrix[i] = weighted_feat_mid @ feat2.T
    
    # 构造 ID 列表
    ids1 = list(range(len(feat1)))
    ids2 = list(range(len(feat2)))
    
    # 计算检索指标
    print("Computing retrieval metrics...")
    metrics = compute_metric_ret(score_matrix, ids1, ids2, direction="forward")
    return metrics

def compute_metric_ret(score_matrix, ids1, ids2, direction="forward"):
    """
    计算检索指标。

    Args:
        score_matrix (torch.Tensor): 相似度矩阵。
        ids1 (list): 第一个模态的 ID 列表。
        ids2 (list): 第二个模态的 ID 列表。
        direction (str): 检索方向 ("forward" 或 "backward")。

    Returns:
        dict: 包含检索指标的字典。
    """
    assert score_matrix.shape == (len(ids1), len(ids2))

    if direction == "forward":  # 模态1 -> 模态2
        indice_matrix = score_matrix.sort(dim=-1, descending=True)[1].tolist()
        rank = []
        for i in range(len(ids1)):
            gt_index = ids2.index(ids1[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids1)
        r5 = (rank < 5).sum().item() / len(ids1)
        r10 = (rank < 10).sum().item() / len(ids1)

        metrics = {
            "forward_r1": round(r1 * 100, 1),
            "forward_r5": round(r5 * 100, 1),
            "forward_r10": round(r10 * 100, 1),
        }

    else:  # 模态2 -> 模态1
        indice_matrix = score_matrix.sort(dim=0, descending=True)[1].permute(1, 0).tolist()
        rank = []
        for i in range(len(ids2)):
            gt_index = ids1.index(ids2[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids2)
        r5 = (rank < 5).sum().item() / len(ids2)
        r10 = (rank < 10).sum().item() / len(ids2)

        metrics = {
            "backward_r1": round(r1 * 100, 1),
            "backward_r5": round(r5 * 100, 1),
            "backward_r10": round(r10 * 100, 1),
        }

    return metrics

# 示例用法
if __name__ == "__main__":
    model = OmniBind_Base(pretrained=True).cuda().eval()
    json_path = "/data/jzw/valor-32k-annotations/desc_val_filtered.json"

    # 根文件夹路径（用于图片模态）
    base_folder = "/data/jzw/processed_valor32k_val/frames"

    # 音频文件夹路径（用于音频模态）
    audio_folder = "/data/jzw/processed_valor32k_val/audio"
    
    # 定义主模态和中间模态
    primary_modality1 = "audio"   # 主模态1（如图像）
    primary_modality2 = "text"    # 主模态2（如文本）
    intermediate_modality = "image"  # 中间模态（如音频）
    
    # 执行评估
    metrics = evaluate_with_intermediate_modality(
        model=model,
        primary_modality1=primary_modality1,
        primary_modality2=primary_modality2,
        intermediate_modality=intermediate_modality,
        json_path=json_path,
        base_folder=base_folder,
        audio_folder=audio_folder,
        batch_size=32
    )
    
    # 打印结果
    print("\nEnhanced Retrieval Metrics:")
    for key, value in metrics.items():
        print(f"{key}: {value}")