import torch
from omni_model.omni_space import OmniBind_Base
import os
import json
import random
from tqdm import tqdm
import pickle  # Add this import for saving/loading features
import torch.nn as nn  # Add this import for nn.Linear

def create_batches_from_json(json_path, base_folder, batch_size=32, modality="image", audio_folder=None):
    """
    按照 JSON 文件的结构读取数据，支持图片、音频或文字模态，生成路径或文本的 batch。

    Args:
        json_path (str): JSON 文件路径，每个键是文件夹名称。
        batch_size (int): 每个 batch 的大小。
        modality (str): 模态类型，支持 "image"、"audio"、"text"。
        audio_folder (str): 音频文件夹路径（用于音频模态）。

    Returns:
        list: 包含所有 batch 的列表，每个 batch 是一个路径或文本列表。
    """
    # 加载 JSON 文件
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 存储所有数据
    all_data = []

    if modality == "image":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing image folders"):
            folder_name = item["video_id"]  # 假设每个元素有一个 "id" 字段表示文件夹名称
            folder_path = os.path.join(base_folder, folder_name)

            # 检查文件夹是否存在
            if not os.path.exists(folder_path):
                print(f"Warning: Folder {folder_path} does not exist.")
                continue

            # 获取文件夹中的所有照片
            images = [os.path.join(folder_path, img) for img in os.listdir(folder_path) if img.endswith(('.jpg', '.png'))]

            # 随机选择一张照片
            selected_image = random.choice(images)
            all_data.append(selected_image)

    elif modality == "audio":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing audio files"):
            if audio_folder is None:
                raise ValueError("audio_folder must be specified for audio modality.")

            key = item["video_id"]  # 假设每个元素有一个 "id" 字段表示音频文件名
            # 构建音频文件路径
            audio_path = os.path.join(audio_folder, f"{key}.wav")

            # 检查音频文件是否存在
            if not os.path.exists(audio_path):
                print(f"Warning: Audio file {audio_path} does not exist.")
                continue

            all_data.append(audio_path)

    elif modality == "text":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing text data"):
            value = item["desc"]  # 假设每个元素有一个 "description" 字段表示文本内容
            all_data.append(value)

    else:
        raise ValueError("Unsupported modality. Choose from 'image', 'audio', or 'text'.")

    # 将所有数据分成 batch
    batches = [all_data[i:i + batch_size] for i in range(0, len(all_data), batch_size)]

    return batches

def save_features_to_disk(features, filepath):
    """Save features to disk."""
    with open(filepath, 'wb') as f:
        pickle.dump(features, f)

def load_features_from_disk(filepath):
    """Load features from disk."""
    with open(filepath, 'rb') as f:
        return pickle.load(f)

def evaluate_with_intermediate_modality(
    model: OmniBind_Base,
    primary_modality1: str,
    primary_modality2: str,
    intermediate_modality: str,
    json_path: str,
    base_folder: str,
    audio_folder: str,
    batch_size: int = 32,
    method: str = "original",  # Add a parameter to select the method
    load_feat_mid_from_file: bool = False,  # Rename parameter to indicate loading feat_mid from a file
    use_model_transform: bool = False  # Add a parameter to enable model transformation
) -> dict:
    """
    使用中间模态增强检索效果的评估函数
    """
    # 定义保存/加载特征的文件路径
    feat1_mid_path = 'feat1_mid.pkl'
    feat2_mid_path = 'feat2_mid.pkl'

    # 加载 feat1_mid 和 feat2_mid
    if not (os.path.exists(feat1_mid_path) and os.path.exists(feat2_mid_path)):
        raise ValueError("Feature files feat1_mid.pkl and/or feat2_mid.pkl are missing. Ensure they are available on disk.")

    print("Loading feat1_mid and feat2_mid from disk...")
    feat1_mid = torch.load(feat1_mid_path)
    feat2_mid = torch.load(feat2_mid_path)

    if method.startswith("distribution"):
        # 新方法：基于分布相似度计算最终相似度矩阵
        print("Normalizing similarity distributions...")
        if "softmax" in method:
            feat1_mid_norm = torch.softmax(feat1_mid, dim=1)  # Softmax normalization
            feat2_mid_norm = torch.softmax(feat2_mid, dim=1)
        
        del feat1_mid
        del feat2_mid
        torch.cuda.empty_cache()

        print(f"Computing final similarity matrix using {method}...")
        if "matrix-multiplication" in method:
            # 新方法：两个相似度矩阵相乘
            score_matrix = torch.matmul(feat1_mid_norm, feat2_mid_norm.T)
        else:
            feat1_mid_norm = feat1_mid_norm / feat1_mid_norm.norm(dim=1, keepdim=True)
            feat2_mid_norm = feat2_mid_norm / feat2_mid_norm.norm(dim=1, keepdim=True)
            score_matrix = torch.matmul(feat1_mid_norm, feat2_mid_norm.T)
    else:
        raise ValueError("Unsupported method. Choose 'original' or 'distribution-{metric}-{normalization}'.")

    # 构造 ID 列表
    ids1 = list(range(len(feat1_mid_norm)))
    ids2 = list(range(len(feat2_mid_norm)))

    # 计算检索指标
    print("Computing retrieval metrics...")
    metrics = compute_metric_ret(score_matrix, ids1, ids2, direction="forward")
    return metrics

def compute_metric_ret(score_matrix, ids1, ids2, direction="backward"):
    """
    计算检索指标。

    Args:
        score_matrix (torch.Tensor): 相似度矩阵。
        ids1 (list): 第一个模态的 ID 列表。
        ids2 (list): 第二个模态的 ID 列表。
        direction (str): 检索方向 ("forward" 或 "backward")。

    Returns:
        dict: 包含检索指标的字典。
    """
    assert score_matrix.shape == (len(ids1), len(ids2))

    if direction == "forward":  # 模态1 -> 模态2
        indice_matrix = score_matrix.sort(dim=-1, descending=True)[1].tolist()
        rank = []
        for i in range(len(ids1)):
            gt_index = ids2.index(ids1[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids1)
        r5 = (rank < 5).sum().item() / len(ids1)
        r10 = (rank < 10).sum().item() / len(ids1)

        metrics = {
            "forward_r1": round(r1 * 100, 1),
            "forward_r5": round(r5 * 100, 1),
            "forward_r10": round(r10 * 100, 1),
        }

    else:  # 模态2 -> 模态1
        indice_matrix = score_matrix.sort(dim=0, descending=True)[1].permute(1, 0).tolist()
        rank = []
        for i in range(len(ids2)):
            gt_index = ids1.index(ids2[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids2)
        r5 = (rank < 5).sum().item() / len(ids2)
        r10 = (rank < 10).sum().item() / len(ids2)

        metrics = {
            "backward_r1": round(r1 * 100, 1),
            "backward_r5": round(r5 * 100, 1),
            "backward_r10": round(r10 * 100, 1),
        }

    return metrics

# 示例用法
if __name__ == "__main__":
    # model = OmniBind_Base(pretrained=True).cuda().eval()
    json_path = "/data/jzw/valor-32k-annotations/desc_val_filtered.json"

    # 根文件夹路径（用于图片模态）
    base_folder = "/data/jzw/processed_valor32k_val/frames"

    # 音频文件夹路径（用于音频模态）
    audio_folder = "/data/jzw/processed_valor32k_val/audio"
    
    # 定义主模态和中间模态
    primary_modality1 = "audio"   # 主模态1（如图像）
    primary_modality2 = "text"    # 主模态2（如文本）
    intermediate_modality = "image"  # 中间模态（如音频）
    
    # 执行评估
    metrics = evaluate_with_intermediate_modality(
        model=None,
        primary_modality1=primary_modality1,
        primary_modality2=primary_modality2,
        intermediate_modality=intermediate_modality,
        json_path=json_path,
        base_folder=base_folder,
        audio_folder=audio_folder,
        batch_size=32,
        method="distribution-cosine-softmax",  # Specify the method
        load_feat_mid_from_file=True,  # Use same_id_features.pkl
        use_model_transform=True  # Enable model transformation
    )
    
    # 打印结果
    print("\nEnhanced Retrieval Metrics:")
    for key, value in metrics.items():
        print(f"{key}: {value}")