"""
CN-CLIP原始模型评估器
"""

import torch
import numpy as np
from typing import Dict, List, Tuple, Optional
import logging
from tqdm import tqdm
import time
import json
from pathlib import Path

logger = logging.getLogger(__name__)


class CNClipEvaluator:
    """CN-CLIP模型评估器 - 实现标准的图文检索评估指标"""
    
    def __init__(self, model_loader, device="cuda" if torch.cuda.is_available() else "cpu"):
        self.model_loader = model_loader
        self.device = device
        self.results_history = []
    
    def evaluate_retrieval(self, dataloader, k_values=[1, 5, 10], 
                          max_samples=None, save_features=False) -> Dict:
        """
        评估图文检索性能
        
        Args:
            dataloader: 数据加载器
            k_values: Top-K值列表
            max_samples: 最大样本数 (None=全部)
            save_features: 是否保存特征向量
            
        Returns:
            包含评估结果的字典
        """
        logger.info("Starting retrieval evaluation...")
        
        if self.model_loader.model is None:
            raise RuntimeError("Model not loaded")
        
        # 特征提取
        image_features = []
        text_features = []
        metadatas = []
        
        sample_count = 0
        start_time = time.time()
        
        with torch.no_grad():
            for batch_idx, (images, texts, batch_metadata) in enumerate(tqdm(dataloader, desc="Extracting features")):
                if len(images) == 0:  # 跳过空批次
                    continue
                    
                images = images.to(self.device)
                texts = texts.to(self.device)
                
                # 提取特征
                batch_img_features = self.model_loader.encode_image(images)
                batch_txt_features = self.model_loader.encode_text(texts)
                
                # 累积特征
                image_features.append(batch_img_features.cpu())
                text_features.append(batch_txt_features.cpu()) 
                metadatas.extend(batch_metadata)
                
                sample_count += len(images)
                
                # 检查是否达到最大样本数
                if max_samples and sample_count >= max_samples:
                    logger.info(f"Reached max samples limit: {max_samples}")
                    break
        
        if not image_features:
            raise RuntimeError("No valid samples found in dataset")
        
        # 合并特征
        image_features = torch.cat(image_features, dim=0)
        text_features = torch.cat(text_features, dim=0) 
        
        extraction_time = time.time() - start_time
        logger.info(f"Feature extraction completed: {sample_count} samples in {extraction_time:.2f}s")
        
        # 计算相似度矩阵
        start_time = time.time()
        similarity_matrix = image_features @ text_features.T
        similarity_time = time.time() - start_time
        
        # 评估检索性能
        evaluation_results = self._compute_retrieval_metrics(
            similarity_matrix, k_values
        )
        
        # 汇总结果
        results = {
            "model_info": self.model_loader.get_model_info(),
            "dataset_info": {
                "total_samples": sample_count,
                "feature_dim": image_features.shape[1]
            },
            "timing": {
                "feature_extraction_time": extraction_time,
                "similarity_computation_time": similarity_time,
                "avg_time_per_sample": extraction_time / sample_count
            },
            "retrieval_metrics": evaluation_results,
            "similarity_stats": self._compute_similarity_stats(similarity_matrix)
        }
        
        # 可选：保存特征
        if save_features:
            results["features"] = {
                "image_features": image_features,
                "text_features": text_features,
                "metadatas": metadatas
            }
        
        self.results_history.append(results)
        return results
    
    def _compute_retrieval_metrics(self, similarity_matrix: torch.Tensor, 
                                  k_values: List[int]) -> Dict:
        """计算检索评估指标"""
        n_samples = similarity_matrix.shape[0]
        
        # Image-to-Text检索
        i2t_ranks = []
        for i in range(n_samples):
            # 找到当前图像对应文本的排名
            scores = similarity_matrix[i]
            rank = (scores >= scores[i]).sum().item()
            i2t_ranks.append(rank)
        
        # Text-to-Image检索  
        t2i_ranks = []
        for i in range(n_samples):
            # 找到当前文本对应图像的排名
            scores = similarity_matrix[:, i]
            rank = (scores >= scores[i]).sum().item()
            t2i_ranks.append(rank)
        
        # 计算Recall@K
        i2t_recall = {}
        t2i_recall = {}
        
        for k in k_values:
            i2t_recall[f"R@{k}"] = np.mean([1 if rank <= k else 0 for rank in i2t_ranks])
            t2i_recall[f"R@{k}"] = np.mean([1 if rank <= k else 0 for rank in t2i_ranks])
        
        # 计算平均排名和中位数排名
        i2t_mean_rank = np.mean(i2t_ranks)
        t2i_mean_rank = np.mean(t2i_ranks)
        i2t_median_rank = np.median(i2t_ranks)
        t2i_median_rank = np.median(t2i_ranks)
        
        return {
            "image_to_text": {
                "recall": i2t_recall,
                "mean_rank": i2t_mean_rank,
                "median_rank": i2t_median_rank
            },
            "text_to_image": {
                "recall": t2i_recall, 
                "mean_rank": t2i_mean_rank,
                "median_rank": t2i_median_rank
            },
            "average": {
                "recall": {f"R@{k}": (i2t_recall[f"R@{k}"] + t2i_recall[f"R@{k}"]) / 2 
                          for k in k_values}
            }
        }
    
    def _compute_similarity_stats(self, similarity_matrix: torch.Tensor) -> Dict:
        """计算相似度矩阵统计信息"""
        similarities = similarity_matrix.flatten()
        
        return {
            "mean": similarities.mean().item(),
            "std": similarities.std().item(),
            "min": similarities.min().item(),
            "max": similarities.max().item(),
            "median": similarities.median().item(),
            "diagonal_mean": similarity_matrix.diag().mean().item(),  # 正样本对相似度均值
            "diagonal_std": similarity_matrix.diag().std().item()
        }
    
    def generate_report(self, results: Dict, save_path: Optional[str] = None) -> str:
        """生成评估报告"""
        report_lines = []
        report_lines.append("=" * 80)
        report_lines.append("CN-CLIP 文物数据集基线评估报告")
        report_lines.append("=" * 80)
        
        # 模型信息
        model_info = results["model_info"]
        report_lines.append(f"模型名称: {model_info['model_name']}")
        report_lines.append(f"设备: {model_info['device']}")
        report_lines.append(f"总参数量: {model_info['total_parameters']:,}")
        report_lines.append("")
        
        # 数据集信息
        dataset_info = results["dataset_info"]
        report_lines.append(f"数据集样本数: {dataset_info['total_samples']:,}")
        report_lines.append(f"特征维度: {dataset_info['feature_dim']}")
        report_lines.append("")
        
        # 性能指标
        retrieval = results["retrieval_metrics"]
        report_lines.append("检索性能指标:")
        report_lines.append("-" * 40)
        
        # Image-to-Text
        i2t = retrieval["image_to_text"]
        report_lines.append("Image → Text:")
        for metric, value in i2t["recall"].items():
            report_lines.append(f"  {metric}: {value:.4f}")
        report_lines.append(f"  Mean Rank: {i2t['mean_rank']:.2f}")
        report_lines.append(f"  Median Rank: {i2t['median_rank']:.2f}")
        report_lines.append("")
        
        # Text-to-Image
        t2i = retrieval["text_to_image"]  
        report_lines.append("Text → Image:")
        for metric, value in t2i["recall"].items():
            report_lines.append(f"  {metric}: {value:.4f}")
        report_lines.append(f"  Mean Rank: {t2i['mean_rank']:.2f}")
        report_lines.append(f"  Median Rank: {t2i['median_rank']:.2f}")
        report_lines.append("")
        
        # 平均性能
        avg = retrieval["average"]
        report_lines.append("平均性能:")
        for metric, value in avg["recall"].items():
            report_lines.append(f"  {metric}: {value:.4f}")
        report_lines.append("")
        
        # 相似度统计
        sim_stats = results["similarity_stats"]
        report_lines.append("相似度矩阵统计:")
        report_lines.append(f"  均值: {sim_stats['mean']:.4f}")
        report_lines.append(f"  标准差: {sim_stats['std']:.4f}")
        report_lines.append(f"  最大值: {sim_stats['max']:.4f}")
        report_lines.append(f"  最小值: {sim_stats['min']:.4f}")
        report_lines.append(f"  正样本均值: {sim_stats['diagonal_mean']:.4f}")
        report_lines.append("")
        
        # 性能统计
        timing = results["timing"]
        report_lines.append("性能统计:")
        report_lines.append(f"  特征提取耗时: {timing['feature_extraction_time']:.2f}s")
        report_lines.append(f"  相似度计算耗时: {timing['similarity_computation_time']:.2f}s") 
        report_lines.append(f"  平均每样本耗时: {timing['avg_time_per_sample']:.4f}s")
        report_lines.append("")
        
        report_lines.append("=" * 80)
        
        report = "\n".join(report_lines)
        
        # 保存报告
        if save_path:
            with open(save_path, 'w', encoding='utf-8') as f:
                f.write(report)
            logger.info(f"Report saved to {save_path}")
        
        return report