import torch
from omni_model.omni_space import OmniBind_Base
import os
import json
import random
from tqdm import tqdm
import pickle  # Add this import for saving/loading features
import torch.nn as nn  # Add this import for nn.Linear

def create_batches_from_json(json_path, base_folder, batch_size=32, modality="image", audio_folder=None):
    """
    按照 JSON 文件的结构读取数据，支持图片、音频或文字模态，生成路径或文本的 batch。

    Args:
        json_path (str): JSON 文件路径，每个键是文件夹名称。
        batch_size (int): 每个 batch 的大小。
        modality (str): 模态类型，支持 "image"、"audio"、"text"。
        audio_folder (str): 音频文件夹路径（用于音频模态）。

    Returns:
        list: 包含所有 batch 的列表，每个 batch 是一个路径或文本列表。
    """
    # 加载 JSON 文件
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 存储所有数据
    all_data = []

    if modality == "image":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing image folders"):
            folder_name = item["video_id"]  # 假设每个元素有一个 "id" 字段表示文件夹名称
            folder_path = os.path.join(base_folder, folder_name)

            # 检查文件夹是否存在
            if not os.path.exists(folder_path):
                print(f"Warning: Folder {folder_path} does not exist.")
                continue

            # 获取文件夹中的所有照片
            images = [os.path.join(folder_path, img) for img in os.listdir(folder_path) if img.endswith(('.jpg', '.png'))]

            # 随机选择一张照片
            selected_image = random.choice(images)
            all_data.append(selected_image)

    elif modality == "audio":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing audio files"):
            if audio_folder is None:
                raise ValueError("audio_folder must be specified for audio modality.")

            key = item["video_id"]  # 假设每个元素有一个 "id" 字段表示音频文件名
            # 构建音频文件路径
            audio_path = os.path.join(audio_folder, f"{key}.wav")

            # 检查音频文件是否存在
            if not os.path.exists(audio_path):
                print(f"Warning: Audio file {audio_path} does not exist.")
                continue

            all_data.append(audio_path)

    elif modality == "text":
        # 遍历 JSON 文件中的每个元素
        for item in tqdm(data, desc="Processing text data"):
            value = item["desc"]  # 假设每个元素有一个 "description" 字段表示文本内容
            all_data.append(value)

    else:
        raise ValueError("Unsupported modality. Choose from 'image', 'audio', or 'text'.")

    # 将所有数据分成 batch
    batches = [all_data[i:i + batch_size] for i in range(0, len(all_data), batch_size)]

    return batches

def save_features_to_disk(features, filepath):
    """Save features to disk."""
    with open(filepath, 'wb') as f:
        pickle.dump(features, f)

def load_features_from_disk(filepath):
    """Load features from disk."""
    with open(filepath, 'rb') as f:
        return pickle.load(f)

def evaluate_with_intermediate_modality(
    model: OmniBind_Base,
    primary_modality1: str,
    primary_modality2: str,
    intermediate_modality: str,
    json_path: str,
    base_folder: str,
    audio_folder: str,
    batch_size: int = 32,
    method: str = "original",  # Add a parameter to select the method
    load_feat_mid_from_file: bool = False,  # Rename parameter to indicate loading feat_mid from a file
    use_model_transform: bool = False  # Add a parameter to enable model transformation
) -> dict:
    """
    使用中间模态增强检索效果的评估函数
    """
    # 定义保存/加载特征的文件路径
    feat1_path = f"{primary_modality1}_features.pkl"
    feat_mid_path = f"{intermediate_modality}_features.pkl"
    feat2_path = f"{primary_modality2}_features.pkl"
    same_id_feat_mid_path = "intersection_features_top5.pkl"  # Path for same_id_features.pkl

    # 如果 model 为 None，直接从磁盘加载所有特征
    if model is None:
        print("Model is None. Loading all features from disk...")
        if not (os.path.exists(feat1_path) and os.path.exists(feat2_path)):
            raise ValueError("Feature files are missing. Ensure all feature files are available on disk.")
        feat1 = load_features_from_disk(feat1_path)
        feat2 = load_features_from_disk(feat2_path)

        # 加载 feat_mid，根据标志选择文件
        if load_feat_mid_from_file:
            if not os.path.exists(same_id_feat_mid_path):
                raise ValueError(f"{same_id_feat_mid_path} is missing. Ensure the file is available on disk.")
            print(f"Loading intermediate modality features from {same_id_feat_mid_path}...")
            feat_mid = load_features_from_disk(same_id_feat_mid_path)
        else:
            if not os.path.exists(feat_mid_path):
                raise ValueError(f"{feat_mid_path} is missing. Ensure the file is available on disk.")
            print(f"Loading intermediate modality features from {feat_mid_path}...")
            feat_mid = load_features_from_disk(feat_mid_path)
    else:
        # 创建主模态和中间模态的联合批次
        print(f"Creating batches for {primary_modality1} & {intermediate_modality}...")
        batches1 = create_batches_from_json(json_path, base_folder, batch_size, primary_modality1, audio_folder)
        batches_mid = create_batches_from_json(json_path, base_folder, batch_size, intermediate_modality, audio_folder)
        
        # 创建中间模态和主模态2的联合批次
        print(f"Creating batches for {intermediate_modality} & {primary_modality2}...")
        batches2 = create_batches_from_json(json_path, base_folder, batch_size, primary_modality2, audio_folder)

        # 提取或加载主模态1的所有特征
        if os.path.exists(feat1_path):
            print(f"Loading {primary_modality1} features from disk...")
            feat1 = load_features_from_disk(feat1_path)
        else:
            print(f"Extracting {primary_modality1} features...")
            feat1_list = []
            for b1 in batches1:
                if primary_modality1 == "audio":
                    feat1 = model.emb_audios(b1)
                elif primary_modality1 == "image":
                    feat1 = model.emb_images(b1)
                else:
                    feat1 = model.emb_texts(b1)
                feat1_list.append(feat1)
            feat1 = torch.cat(feat1_list, dim=0)
            save_features_to_disk(feat1, feat1_path)
        
        # 提取或加载中间模态的所有特征
        if os.path.exists(feat_mid_path):
            print(f"Loading {intermediate_modality} features from disk...")
            feat_mid = load_features_from_disk(feat_mid_path)
        else:
            print(f"Extracting {intermediate_modality} features...")
            feat_mid_list = []
            for b_mid in batches_mid:
                if intermediate_modality == "audio":
                    feat_mid = model.emb_audios(b_mid)
                elif intermediate_modality == "image":
                    feat_mid = model.emb_images(b_mid)
                else:
                    feat_mid = model.emb_texts(b_mid)
                feat_mid_list.append(feat_mid)
            feat_mid = torch.cat(feat_mid_list, dim=0)
            save_features_to_disk(feat_mid, feat_mid_path)
        
        # 提取或加载主模态2的所有特征
        if os.path.exists(feat2_path):
            print(f"Loading {primary_modality2} features from disk...")
            feat2 = load_features_from_disk(feat2_path)
        else:
            print(f"Extracting {primary_modality2} features...")
            feat2_list = []
            for b2 in batches2:
                if primary_modality2 == "audio":
                    feat2 = model.emb_audios(b2)
                elif primary_modality2 == "image":
                    feat2 = model.emb_images(b2)
                else:
                    feat2 = model.emb_texts(b2)
                feat2_list.append(feat2)
            feat2 = torch.cat(feat2_list, dim=0)
            save_features_to_disk(feat2, feat2_path)
    
    # Transform feat1 and feat2 into feat1_mid and feat2_mid using the model
    if use_model_transform:
        if not os.path.exists('trained_model.pth'):
            raise FileNotFoundError("Trained model parameters not found. Ensure 'trained_model.pth' exists.")

        print("Loading trained model parameters...")
        model = nn.Linear(1536, 768)
        state_dict = torch.load('trained_model.pth')
        # Remove 'module.' prefix if present in the state_dict keys
        state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
        model.load_state_dict(state_dict)  # Load model on CPU
        model = model.to('cuda')
        model.eval()

        print("Transforming feat1 and feat2 into feat1_mid and feat2_mid using the model...")
        feat1_mid = model(feat1)
        feat2_mid = model(feat2)
        torch.save(feat1_mid, 'feat1_mid.pkl')
        torch.save(feat2_mid, 'feat2_mid.pkl')
    else:
        # 计算主模态1 -> 中间模态的相似度矩阵
        print(f"Computing {primary_modality1} -> {intermediate_modality} similarity matrix...")
        feat1_mid = feat1 @ feat_mid.T

        # 计算主模态2 -> 中间模态的相似度矩阵
        print(f"Computing {primary_modality2} -> {intermediate_modality} similarity matrix...")
        feat2_mid = feat2 @ feat_mid.T
        # feat2_mid = feat_mid @ feat2.T

    if method == "original":
        # 原始方法：使用中间模态前10个检索结果进行加权
        print("Computing final similarity matrix with weighted top-k intermediate modality features...")
        top_k = 10
        score_matrix = torch.zeros(feat1.size(0), feat2.size(0), device=feat1.device)

        for i in range(feat1_mid.size(0)):
            # 获取中间模态的前top_k个检索结果及其相似度
            top_k_indices_mid = torch.topk(feat1_mid[i], top_k, dim=0).indices

            # 初始化当前样本的最大相似度和对应目标模态序号
            max_similarity = float('-inf')
            best_target_index = -1

            # 遍历中间模态的top_k特征
            for mid_idx in top_k_indices_mid:
                # 获取中间模态特征
                mid_feature = feat_mid[mid_idx]

                # 计算该中间模态特征与目标模态特征的相似度
                mid_feat2_sim = mid_feature @ feat2.T

                # 获取目标模态的前top_k个特征及其索引
                top_k_similarities, top_k_indices = torch.topk(mid_feat2_sim, top_k, dim=0)

                # 对 top_k 相似度进行归一化
                top_k_similarities = torch.softmax(top_k_similarities, dim=0)

                # 遍历目标模态的top_k特征，计算k*k相似度矩阵
                for j, sim in enumerate(top_k_similarities):
                    if sim.item() > max_similarity:
                        max_similarity = sim.item()
                        best_target_index = top_k_indices[j].item()

            # 检查是否找到有效的目标模态索引
            if best_target_index != -1:
                # 将最大相似度存入相似度矩阵
                score_matrix[i, best_target_index] = max_similarity

    elif method.startswith("distribution"):
        # 新方法：基于分布相似度计算最终相似度矩阵
        print("Normalizing similarity distributions...")
        if "softmax" in method:
            feat1_mid_norm = torch.softmax(feat1_mid, dim=1)  # Softmax normalization
            feat2_mid_norm = torch.softmax(feat2_mid, dim=1)
        elif "l1" in method:
            feat1_mid_norm = feat1_mid / (feat1_mid.sum(dim=1, keepdim=True) + 1e-8)  # L1 normalization
            feat2_mid_norm = feat2_mid / (feat2_mid.sum(dim=1, keepdim=True) + 1e-8)
        elif "l2" in method:
            feat1_mid_norm = feat1_mid / (feat1_mid.norm(p=2, dim=1, keepdim=True) + 1e-8)  # L2 normalization
            feat2_mid_norm = feat2_mid / (feat2_mid.norm(p=2, dim=1, keepdim=True) + 1e-8)
        else:
            feat1_mid_norm = feat1_mid
            feat2_mid_norm = feat2_mid

        print(f"Computing final similarity matrix using {method}...")
        if "matrix-multiplication" in method:
            # 新方法：两个相似度矩阵相乘
            score_matrix = torch.matmul(feat1_mid_norm, feat2_mid_norm.T)
        else:
            score_matrix = torch.zeros(feat1.size(0), feat2.size(0), device=feat1.device)

            for i in range(feat1.size(0)):
                for j in range(feat2.size(0)):
                    if "cosine" in method:
                        # 使用余弦相似度
                        similarity = torch.cosine_similarity(feat1_mid_norm[i], feat2_mid_norm[j], dim=0)
                    elif "kl" in method:
                        # 使用 KL 散度（注意：KL 散度非对称）
                        similarity = -torch.sum(feat1_mid_norm[i] * torch.log(feat1_mid_norm[i] / (feat2_mid_norm[j] + 1e-8) + 1e-8))
                    elif "js" in method:
                        # 使用 Jensen-Shannon 散度
                        m = 0.5 * (feat1_mid_norm[i] + feat2_mid_norm[j])
                        similarity = -0.5 * (torch.sum(feat1_mid_norm[i] * torch.log(feat1_mid_norm[i] / (m + 1e-8) + 1e-8)) +
                                            torch.sum(feat2_mid_norm[j] * torch.log(feat2_mid_norm[j] / (m + 1e-8) + 1e-8)))
                    elif "emd" in method:
                        # 使用 Earth Mover's Distance (EMD)
                        cdf1 = torch.cumsum(feat1_mid_norm[i], dim=0)
                        cdf2 = torch.cumsum(feat2_mid_norm[j], dim=0)
                        similarity = -torch.sum(torch.abs(cdf1 - cdf2))  # Negative EMD for similarity
                    elif "hellinger" in method:
                        # 使用 Hellinger Distance
                        sqrt_feat1 = torch.sqrt(feat1_mid_norm[i])
                        sqrt_feat2 = torch.sqrt(feat2_mid_norm[j])
                        similarity = -torch.sqrt(torch.sum((sqrt_feat1 - sqrt_feat2) ** 2))  # Negative Hellinger Distance
                    elif "wasserstein" in method:
                        # 使用 Wasserstein Distance (1st EMD)
                        cdf1 = torch.cumsum(feat1_mid_norm[i], dim=0)
                        cdf2 = torch.cumsum(feat2_mid_norm[j], dim=0)
                        similarity = -torch.sum(torch.abs(cdf1 - cdf2))  # Same as EMD for 1D distributions
                    elif "bhattacharyya" in method:
                        # 使用 Bhattacharyya Distance
                        bc = torch.sum(torch.sqrt(feat1_mid_norm[i] * feat2_mid_norm[j]))
                        similarity = -torch.log(bc + 1e-8)  # Negative Bhattacharyya Distance
                    elif "tvd" in method:
                        # 使用 Total Variation Distance
                        similarity = -0.5 * torch.sum(torch.abs(feat1_mid_norm[i] - feat2_mid_norm[j]))  # Negative TVD
                    elif "chi2" in method:
                        # 使用 Chi-Square Distance
                        similarity = -torch.sum((feat1_mid_norm[i] - feat2_mid_norm[j]) ** 2 / (feat2_mid_norm[j] + 1e-8))
                    elif "correlation" in method:
                        # 使用 Correlation Coefficient
                        mean1 = feat1_mid_norm[i].mean()
                        mean2 = feat2_mid_norm[j].mean()
                        cov = torch.sum((feat1_mid_norm[i] - mean1) * (feat2_mid_norm[j] - mean2))
                        std1 = torch.sqrt(torch.sum((feat1_mid_norm[i] - mean1) ** 2))
                        std2 = torch.sqrt(torch.sum((feat2_mid_norm[j] - mean2) ** 2))
                        similarity = cov / (std1 * std2 + 1e-8)  # Correlation coefficient
                    elif "jaccard" in method:
                        # 使用 Jaccard Similarity
                        intersection = torch.sum(torch.min(feat1_mid_norm[i], feat2_mid_norm[j]))
                        union = torch.sum(torch.max(feat1_mid_norm[i], feat2_mid_norm[j]))
                        similarity = intersection / (union + 1e-8)
                    elif "dice" in method:
                        # 使用 Dice Coefficient
                        intersection = torch.sum(torch.min(feat1_mid_norm[i], feat2_mid_norm[j]))
                        similarity = 2 * intersection / (torch.sum(feat1_mid_norm[i]) + torch.sum(feat2_mid_norm[j]) + 1e-8)
                    elif "kumar-hassebrook" in method:
                        # 使用 Kumar-Hassebrook Similarity
                        dot_product = torch.sum(feat1_mid_norm[i] * feat2_mid_norm[j])
                        similarity = dot_product / (torch.sum(feat1_mid_norm[i] ** 2) + torch.sum(feat2_mid_norm[j] ** 2) - dot_product + 1e-8)
                    else:
                        raise ValueError(f"Unsupported distribution metric: {method}")
                    
                    score_matrix[i, j] = similarity
    elif method == "batch-eval":
        # 新方法：直接计算主模态1和主模态2之间的相似度
        print("Using batch evaluation method...")
        score_matrix = torch.matmul(feat1, feat2.T)
    elif method == "linear-weighted":
        # 新方法：对中间模态的topk使用线性归一化权重检索目标模态
        print("Using linear-weighted method...")
        top_k = 10
        score_matrix = torch.zeros(feat1.size(0), feat2.size(0), device=feat1.device)

        for i in feat1_mid.size(0):
            # 获取中间模态的前top_k个检索结果及其相似度
            top_k_indices = torch.topk(feat1_mid[i], top_k, dim=0).indices
            top_k_similarities = torch.topk(feat1_mid[i], top_k, dim=0).values

            # 线性归一化相似度作为权重
            weights = top_k_similarities / (top_k_similarities.sum() + 1e-8)

            # 加权计算最终相似度
            weighted_feat_mid = (weights.unsqueeze(1) * feat_mid[top_k_indices]).sum(dim=0)
            score_matrix[i] = weighted_feat_mid @ feat2.T
    else:
        raise ValueError("Unsupported method. Choose 'original' or 'distribution-{metric}-{normalization}'.")

    # 构造 ID 列表
    ids1 = list(range(len(feat1)))
    ids2 = list(range(len(feat2)))

    # 计算检索指标
    print("Computing retrieval metrics...")
    metrics = compute_metric_ret(score_matrix, ids1, ids2, direction="forward")
    return metrics

def compute_metric_ret(score_matrix, ids1, ids2, direction="forward"):
    """
    计算检索指标。

    Args:
        score_matrix (torch.Tensor): 相似度矩阵。
        ids1 (list): 第一个模态的 ID 列表。
        ids2 (list): 第二个模态的 ID 列表。
        direction (str): 检索方向 ("forward" 或 "backward")。

    Returns:
        dict: 包含检索指标的字典。
    """
    assert score_matrix.shape == (len(ids1), len(ids2))

    if direction == "forward":  # 模态1 -> 模态2
        indice_matrix = score_matrix.sort(dim=-1, descending=True)[1].tolist()
        rank = []
        for i in range(len(ids1)):
            gt_index = ids2.index(ids1[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids1)
        r5 = (rank < 5).sum().item() / len(ids1)
        r10 = (rank < 10).sum().item() / len(ids1)

        metrics = {
            "forward_r1": round(r1 * 100, 1),
            "forward_r5": round(r5 * 100, 1),
            "forward_r10": round(r10 * 100, 1),
        }

    else:  # 模态2 -> 模态1
        indice_matrix = score_matrix.sort(dim=0, descending=True)[1].permute(1, 0).tolist()
        rank = []
        for i in range(len(ids2)):
            gt_index = ids1.index(ids2[i])
            rank.append(indice_matrix[i].index(gt_index))

        rank = torch.tensor(rank).to(score_matrix.device)
        r1 = (rank < 1).sum().item() / len(ids2)
        r5 = (rank < 5).sum().item() / len(ids2)
        r10 = (rank < 10).sum().item() / len(ids2)

        metrics = {
            "backward_r1": round(r1 * 100, 1),
            "backward_r5": round(r5 * 100, 1),
            "backward_r10": round(r10 * 100, 1),
        }

    return metrics

# 示例用法
if __name__ == "__main__":
    # model = OmniBind_Base(pretrained=True).cuda().eval()
    json_path = "/data/jzw/valor-32k-annotations/desc_val_filtered.json"

    # 根文件夹路径（用于图片模态）
    base_folder = "/data/jzw/processed_valor32k_val/frames"

    # 音频文件夹路径（用于音频模态）
    audio_folder = "/data/jzw/processed_valor32k_val/audio"
    
    # 定义主模态和中间模态
    primary_modality1 = "audio"   # 主模态1（如图像）
    primary_modality2 = "text"    # 主模态2（如文本）
    intermediate_modality = "image"  # 中间模态（如音频）
    
    # 执行评估
    metrics = evaluate_with_intermediate_modality(
        model=None,
        primary_modality1=primary_modality1,
        primary_modality2=primary_modality2,
        intermediate_modality=intermediate_modality,
        json_path=json_path,
        base_folder=base_folder,
        audio_folder=audio_folder,
        batch_size=32,
        method="distribution-cosine-softmax",  # Specify the method
        load_feat_mid_from_file=True,  # Use same_id_features.pkl
        use_model_transform=True  # Enable model transformation
    )
    
    # 打印结果
    print("\nEnhanced Retrieval Metrics:")
    for key, value in metrics.items():
        print(f"{key}: {value}")