from typing import Dict, List, Tuple, Optional
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from tqdm import tqdm
import logging
import numpy as np
import random
from src.dataset import MMEBDataset
from src.dataset_vstar import VSTARBenchDataset
from src.modeling_clip import CLIPModel
from src.processing_clip import CLIPProcessor
from PIL import Image


logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("Evaluator")


class PositivePairDataset(Dataset):
    """提取图像-文本正样本对的数据集（保留局部对齐所需字段）"""
    def __init__(self, base_dataset, enable_local_alignment: bool = False):
        self.positive_pairs = []
        self.enable_local_alignment = enable_local_alignment
        self._extract_positive_pairs(base_dataset)
        logger.info(f"共提取{len(self.positive_pairs)}个有效图像-文本正样本对（局部对齐验证: {'开启' if enable_local_alignment else '关闭'}）")

    def _extract_positive_pairs(self, base_dataset):
        for idx, sample in enumerate(base_dataset):
            # 基础校验：图像和文本有效性
            if "query_image" not in sample or sample["query_image"] is None:
                continue
            if "query_text" not in sample or "pos_text" not in sample:
                continue

            # 局部对齐验证开启时，必须有有效bbox和masked_image
            bbox = None
            masked_image = None
            if self.enable_local_alignment:
                if "bbox" not in sample or sample["bbox"] is None:
                    logger.debug(f"样本{idx}无有效bbox，跳过（局部对齐验证开启）")
                    continue
                bbox = sample["bbox"]
                if len(bbox) != 4 or any(coord < 0 for coord in bbox):
                    logger.debug(f"样本{idx}bbox格式非法（{bbox}），跳过")
                    continue
                
                masked_image = sample["masked_image"]
            
            # 生成与训练时一致的合并文本
            query_text = sample["query_text"].replace("<|image_1|>", "").strip()
            pos_text = sample["pos_text"].strip()
            if not query_text and not pos_text:
                continue
            merged_text = f"Query: {query_text} Answer: {pos_text}"
            
            # 存储样本对（包含必要的局部对齐字段）
            self.positive_pairs.append((
                sample["query_image"], 
                merged_text, 
                bbox, 
                masked_image
            ))

    def __len__(self) -> int:
        return len(self.positive_pairs)

    def __getitem__(self, idx: int) -> Tuple:
        return self.positive_pairs[idx]


class CrossModalEvaluator:
    def __init__(self, model: CLIPModel, processor: CLIPProcessor, data_args, 
                 num_negatives: int = 19, enable_local_alignment: bool = False):
        """
        保留局部对齐验证能力，恢复旧版Recall计算逻辑
        """
        self.model = model.eval().to(next(model.parameters()).device)
        self.processor = processor
        self.data_args = data_args
        self.num_negatives = num_negatives
        self.total_samples_per_group = num_negatives + 1  # 1个正样本 + N个负样本
        self.enable_local_alignment = enable_local_alignment
        
        # 恢复旧版：固定计算recall@1/5/10
        self.metrics = {
            "recall@1": self._recall_at_k,
            "recall@5": lambda sim: self._recall_at_k(sim, 5),
            "recall@10": lambda sim: self._recall_at_k(sim, 10)
        }
        
        self.embed_dim = self._get_embed_dim()

    def _get_embed_dim(self) -> int:
        try:
            with torch.no_grad():
                text_feat = self.model.get_text_features(
                    **self.processor(text=["test"], return_tensors="pt").to(self.model.device)
                )
            return text_feat.shape[-1]
        except Exception as e:
            logger.warning(f"自动获取维度失败，使用默认值512: {str(e)}")
            return 512

    def _extract_embeddings(self, pairs: List[Tuple]) -> Tuple[torch.Tensor, torch.Tensor]:
        """批量提取图像和文本嵌入"""
        images = [pair[0] for pair in pairs]
        texts = [pair[1] for pair in pairs]
        
        with torch.no_grad():
            img_inputs = self.processor(
                images=images,
                return_tensors="pt",
                padding=True
            ).to(self.model.device)
            img_embeds = self.model.get_image_features(** img_inputs)
            img_embeds = F.normalize(img_embeds, dim=-1).cpu()
        
        with torch.no_grad():
            text_inputs = self.processor(
                text=texts,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=256
            ).to(self.model.device)
            text_embeds = self.model.get_text_features(**text_inputs)
            text_embeds = F.normalize(text_embeds, dim=-1).cpu()
        
        return img_embeds, text_embeds

    def _build_distractors(self, all_pairs: List[Tuple], target_idx: int) -> List[Tuple]:
        """负样本构建逻辑"""
        distractors = []
        while len(distractors) < self.num_negatives:
            neg_idx = random.randint(0, len(all_pairs) - 1)
            if neg_idx != target_idx:
                distractors.append(all_pairs[neg_idx])
        return distractors

    def _recall_at_k(self, similarity: torch.Tensor, k: int = 1) -> float:
        """恢复旧版Recall@k计算逻辑"""
        ranks = torch.argsort(similarity, descending=True)
        
        if ranks.dim() == 1:
            positive_rank = (ranks == 0).nonzero().item() + 1
        else:
            positive_rank = (ranks == 0).nonzero(as_tuple=True)[1].item() + 1
            
        return 1.0 if positive_rank <= k else 0.0

    def evaluate_dataset(self, pair_dataset: PositivePairDataset) -> Dict[str, float]:
        """评估传入的数据集（核心评估逻辑）"""
        if len(pair_dataset) < (self.num_negatives + 1):
            logger.warning(f"有效样本数（{len(pair_dataset)}）不足（需大于{self.num_negatives}个负样本），无法完成评估")
            return {}

        all_pairs = pair_dataset.positive_pairs
        # 初始化结果字典
        results = {
            "img2text_recall@1": [],
            "img2text_recall@5": [],
            "img2text_recall@10": []
        }
        
        # 局部对齐相关指标
        if self.enable_local_alignment:
            results.update({
                "similarity_original": [],
                "similarity_masked": [],
                "similarity_diff": []
            })

        # 处理每个样本
        for target_idx in tqdm(range(len(all_pairs)), desc=f"评估数据集（局部对齐: {'开启' if self.enable_local_alignment else '关闭'}）"):
            # 解包样本（图像, 文本, bbox, 遮罩图像）
            target_img_original, target_text, target_bbox, target_masked_img = all_pairs[target_idx]
            
            # 1. 构建混淆集
            distractors = self._build_distractors(all_pairs, target_idx)
            distraction_set = [(target_img_original, target_text, target_bbox, target_masked_img)] + distractors
            
            # 2. 提取嵌入并计算Recall
            img_embeds, text_embeds = self._extract_embeddings(distraction_set)
            target_img_embed = img_embeds[0:1]
            similarity = (target_img_embed @ text_embeds.T).squeeze()
            
            # 记录Recall指标
            results["img2text_recall@1"].append(self.metrics["recall@1"](similarity))
            results["img2text_recall@5"].append(self.metrics["recall@5"](similarity))
            results["img2text_recall@10"].append(self.metrics["recall@10"](similarity))

            # 3. 局部对齐验证（仅在开启时执行）
            if self.enable_local_alignment:
                # 提取遮罩图像嵌入
                masked_image_pair = [(target_masked_img, target_text, target_bbox, target_masked_img)]
                img_embeds_masked, _ = self._extract_embeddings(masked_image_pair)
                target_img_embed_masked = img_embeds_masked[0:1]
                
                # 计算相似度差值
                sim_original = (target_img_embed @ text_embeds[0:1].T).item()
                sim_masked = (target_img_embed_masked @ text_embeds[0:1].T).item()
                sim_diff = sim_original - sim_masked
                
                # 记录局部对齐指标
                results["similarity_original"].append(sim_original)
                results["similarity_masked"].append(sim_masked)
                results["similarity_diff"].append(sim_diff)

        # 计算平均值
        for key in results:
            if isinstance(results[key], list) and len(results[key]) > 0:
                results[key] = round(np.mean(results[key]), 4)
            else:
                results[key] = 0.0
        
        # 计算平均Recall
        recall_keys = ["img2text_recall@1", "img2text_recall@5", "img2text_recall@10"]
        results["overall_avg_recall"] = round(
            np.mean([results[key] for key in recall_keys]), 4
        )
        
        # 局部对齐指标的平均值（如果开启）
        if self.enable_local_alignment:
            results["avg_similarity_original"] = results["similarity_original"]
            results["avg_similarity_masked"] = results["similarity_masked"]
            results["avg_similarity_diff"] = results["similarity_diff"]

            del results["similarity_original"]
            del results["similarity_masked"]
            del results["similarity_diff"]
        return results


def evaluate_model(
    model: CLIPModel, 
    processor: CLIPProcessor, 
    data_args, 
    dataset=None,
    enable_local_alignment: bool = False,
    num_negatives: int = 19
) -> Dict[str, float]:
    """
    评估入口函数：
    - 当传入dataset时，直接基于该数据集构建正样本对并评估
    - 未传入dataset时，使用默认方式加载数据（兼容原有逻辑）
    """
    evaluator = CrossModalEvaluator(
        model, processor, data_args,
        num_negatives=num_negatives,
        enable_local_alignment=enable_local_alignment
    )
    
    if dataset is not None:
        # 优先使用传入的dataset构建正样本对
        pair_dataset = PositivePairDataset(
            dataset,
            enable_local_alignment=enable_local_alignment
        )
        results = evaluator.evaluate_dataset(pair_dataset)
        return results
    else:
        # 兼容原有逻辑：从数据目录加载默认数据集（如需要可自定义子集逻辑）
        logger.info("未传入dataset，使用默认方式加载数据")
        try:
            # 可根据实际需求调整默认数据集加载逻辑
            base_dataset = MMEBDataset(
                data_dir=data_args.eval_data_dir,
                subsets=["default"],  # 默认子集
                split=data_args.dataset_split,
                image_resolution=data_args.image_resolution,
                max_samples=data_args.max_eval_samples
            )
            pair_dataset = PositivePairDataset(base_dataset, enable_local_alignment)
            results = evaluator.evaluate_dataset(pair_dataset)
            return results
        except Exception as e:
            logger.error(f"加载默认数据集失败: {str(e)}")
            return {}


if __name__ == '__main__':
    import argparse
    import torch
    
    # 设置中文字体支持
    import matplotlib.pyplot as plt
    plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
    
    # 解析命令行参数
    parser = argparse.ArgumentParser(description="CLIP模型跨模态检索评估（保留局部对齐验证）")
    parser.add_argument("--model_path", type=str, required=True,
                        help="预训练模型路径")
    parser.add_argument("--eval_data_dir", type=str, required=True,
                        help="评估数据目录")
    parser.add_argument("--dataset_split", type=str, default="test",
                        choices=["train", "val", "test"], help="数据集分割")
    parser.add_argument("--image_resolution", type=int, default=224,
                        help="图像分辨率")
    parser.add_argument("--max_eval_samples", type=int, default=None,
                        help="最大评估样本数")
    parser.add_argument("--num_negatives", type=int, default=9,
                        help="每个正样本对应的负样本数量")
    parser.add_argument("--enable_local_alignment", action="store_true",
                        help="是否开启局部对齐验证")
    
    args = parser.parse_args()
    
    # 准备数据参数
    class DataArgs:
        def __init__(self):
            self.eval_data_dir = args.eval_data_dir
            self.dataset_split = args.dataset_split
            self.image_resolution = args.image_resolution
            self.max_eval_samples = args.max_eval_samples
    
    data_args = DataArgs()
    
    # 加载模型和处理器
    logger.info(f"加载模型: {args.model_path}")
    model = CLIPModel.from_pretrained(args.model_path)
    processor = CLIPProcessor.from_pretrained(args.model_path)
    
    # 移动模型到可用设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    logger.info(f"使用设备: {device}")
    
    # 执行评估（此处演示未传入dataset的情况，实际使用时可传入自定义dataset）
    logger.info("开始评估...")
    results = evaluate_model(
        model, 
        processor, 
        data_args,
        enable_local_alignment=args.enable_local_alignment,
        num_negatives=args.num_negatives
    )
    
    # 打印评估结果
    logger.info("评估完成！")
    logger.info(f"评估结果: {results}")
    