# -*- coding: utf-8 -*-
"""
Rerank Only 对比实验
直接对比四种rerank模型的效果，不使用embedding预筛选
"""

import os
import json
import time
from typing import List, Dict, Any, Tuple, Optional
from dataclasses import dataclass
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from concurrent.futures import ThreadPoolExecutor, as_completed
from loguru import logger

from .bge_reranker import BGEReranker
from .evaluation_metrics import RetrievalEvaluator
from ...config import (
    get_rerank_experiment_config, RERANK_EVALUATION_CONFIG, 
    RERANK_VISUALIZATION_CONFIG, DEFAULT_DEVICE, 
    MODELS_CACHE_DIR, RESULTS_CACHE_DIR
)


@dataclass
class RerankOnlyResult:
    """
    Rerank Only 实验结果
    """
    model_name: str
    query: str
    candidates: List[str]
    rerank_result: List[Tuple[str, float]]
    processing_time: float
    top_scores: List[float]  # 前几名的分数
    score_variance: float    # 分数方差
    
    # 评估指标
    ndcg_scores: Dict[str, float] = None  # NDCG@k
    mrr: float = 0.0  # Mean Reciprocal Rank
    map_scores: Dict[str, float] = None  # MAP@k
    precision_scores: Dict[str, float] = None  # Precision@k
    recall_scores: Dict[str, float] = None  # Recall@k
    hit_rate: float = 0.0  # Hit Rate
    

class RerankOnlyComparison:
    """
    Rerank Only 对比实验类
    
    直接使用不同的rerank模型对候选文档进行重排序，不进行embedding预筛选
    """
    
    def __init__(self, top_k: int = 10, device: Optional[str] = None, 
                 enable_parallel: bool = True, **kwargs):
        """
        初始化对比实验
        
        Args:
            top_k: 返回的最终结果数量
            device: 设备
            enable_parallel: 是否启用并行处理
            **kwargs: 其他配置参数
        """
        # 获取配置
        experiment_config = get_rerank_experiment_config('rerank_only')
         
        self.top_k = top_k or experiment_config.get('top_k', 10)
        self.device = device or DEFAULT_DEVICE
        self.enable_parallel = enable_parallel
        self.config = {**experiment_config, **kwargs}
        
        # 初始化rerank模型（仅BGE系列）
        model_configs = self.config.get('models', {
            'bge-reranker-base': {'enabled': True},
            'bge-reranker-large': {'enabled': True},
            'bge-reranker-v2-m3': {'enabled': True}
        })
        
        self.rerankers = {}
        for model_name, model_config in model_configs.items():
            if model_config.get('enabled', True):
                self.rerankers[model_name] = BGEReranker(model_name, device=self.device)
        
        # 实验结果
        self.results = []
        
        logger.info(f"初始化Rerank Only对比实验，top_k: {top_k}")
    
    def load_all_models(self) -> None:
        """
        预加载所有可用的rerank模型
        """
        logger.info("开始预加载所有rerank模型...")
        
        for model_name, reranker in self.rerankers.items():
            try:
                if not reranker._is_loaded:
                    logger.info(f"正在加载模型: {model_name}")
                    reranker.load_model()
                    logger.info(f"成功加载模型: {model_name}")
                else:
                    logger.info(f"模型 {model_name} 已加载")
            except Exception as e:
                logger.warning(f"跳过模型 {model_name}: {e}")
        
        logger.info(f"成功加载 {len(self.rerankers)} 个模型: {list(self.rerankers.keys())}")
    
    def _test_single_model(self, model_name: str, reranker, query: str, 
                          candidates: List[str]) -> Optional[RerankOnlyResult]:
        """
        测试单个模型
        
        Args:
            model_name: 模型名称
            reranker: rerank模型实例
            query: 查询文本
            candidates: 候选文档列表
            
        Returns:
            Optional[RerankOnlyResult]: 测试结果
        """
        try:
            # 确保模型已加载
            if not reranker._is_loaded:
                reranker.load_model()
            
            # 重排序
            start_time = time.time()
            rerank_result = reranker.rerank(query, candidates, self.top_k)
            processing_time = time.time() - start_time
            
            # 计算统计信息
            scores = [score for _, score in rerank_result]
            top_scores = scores[:min(5, len(scores))]  # 前5名分数
            score_variance = np.var(scores) if scores else 0.0
            
            # 使用科学化方法生成相关性标签
            evaluator = RetrievalEvaluator()
            
            # 标准化分数
            normalized_scores = evaluator.normalize_scores(scores, method='minmax')
            
            # 使用自适应阈值生成相关性标签
            relevance_labels = evaluator.scores_to_relevance_labels(normalized_scores)
            
            # 使用科学化方法计算所有指标
            k_values = [1, 3, 5, 10]
            metrics = evaluator.evaluate_query(normalized_scores, k_values)
            
            # 提取结果
            ndcg_scores = {f'ndcg@{k}': metrics[f'ndcg@{k}'] for k in k_values}
            map_scores = {f'map@{k}': metrics[f'map@{k}'] for k in k_values}
            precision_scores = {f'precision@{k}': metrics[f'precision@{k}'] for k in k_values}
            recall_scores = {f'recall@{k}': metrics[f'recall@{k}'] for k in k_values}
            mrr = metrics['mrr']
            hit_rate = metrics['hit_rate']
            
            # 创建结果对象
            result = RerankOnlyResult(
                model_name=model_name,
                query=query,
                candidates=candidates,
                rerank_result=rerank_result,
                processing_time=processing_time,
                top_scores=top_scores,
                score_variance=score_variance,
                ndcg_scores=ndcg_scores,
                mrr=mrr,
                map_scores=map_scores,
                precision_scores=precision_scores,
                recall_scores=recall_scores,
                hit_rate=hit_rate
            )
            
            logger.info(f"{model_name} 完成，耗时: {processing_time:.3f}秒")
            return result
            
        except Exception as e:
            logger.error(f"模型 {model_name} 测试失败: {e}")
            return None
    
    def run_single_comparison(self, query: str, candidates: List[str]) -> Dict[str, RerankOnlyResult]:
        """
        运行单个查询的对比实验
        
        Args:
            query: 查询文本
            candidates: 候选文档列表
            
        Returns:
            Dict[str, RerankOnlyResult]: 各模型的实验结果
        """
        logger.info(f"开始Rerank Only对比实验，查询: {query[:50]}..., 候选数量: {len(candidates)}")
        
        results = {}
        
        if self.enable_parallel:
            # 并行处理
            with ThreadPoolExecutor(max_workers=len(self.rerankers)) as executor:
                # 提交任务
                future_to_model = {
                    executor.submit(self._test_single_model, model_name, reranker, query, candidates): model_name
                    for model_name, reranker in self.rerankers.items()
                }
                
                # 收集结果
                for future in as_completed(future_to_model):
                    model_name = future_to_model[future]
                    try:
                        result = future.result()
                        if result is not None:
                            results[model_name] = result
                    except Exception as e:
                        logger.error(f"并行处理模型 {model_name} 失败: {e}")
        else:
            # 串行处理
            for model_name, reranker in self.rerankers.items():
                result = self._test_single_model(model_name, reranker, query, candidates)
                if result is not None:
                    results[model_name] = result
        
        return results
    
    def run_batch_comparison(self, queries: List[str], candidates_list: List[List[str]]) -> List[Dict[str, RerankOnlyResult]]:
        """
        运行批量查询的对比实验
        
        Args:
            queries: 查询列表
            candidates_list: 每个查询对应的候选文档列表
            
        Returns:
            List[Dict[str, RerankOnlyResult]]: 所有查询的实验结果
        """
        if len(queries) != len(candidates_list):
            raise ValueError("查询数量与候选列表数量不匹配")
        
        all_results = []
        
        for i, (query, candidates) in enumerate(zip(queries, candidates_list)):
            logger.info(f"处理查询 {i+1}/{len(queries)}")
            try:
                results = self.run_single_comparison(query, candidates)
                all_results.append(results)
                self.results.extend(results.values())
            except Exception as e:
                logger.error(f"查询 {i+1} 处理失败: {e}")
                continue
        
        return all_results
    
    def run_uniform_candidates_comparison(self, queries: List[str], candidates: List[str]) -> List[Dict[str, RerankOnlyResult]]:
        """
        使用统一候选集的批量对比实验
        
        Args:
            queries: 查询列表
            candidates: 统一的候选文档列表
            
        Returns:
            List[Dict[str, RerankOnlyResult]]: 所有查询的实验结果
        """
        candidates_list = [candidates] * len(queries)
        return self.run_batch_comparison(queries, candidates_list)
    
    def analyze_results(self) -> Dict[str, Any]:
        """
        分析实验结果
        
        Returns:
            Dict[str, Any]: 分析报告
        """
        if not self.results:
            return {"error": "没有可分析的结果"}
        
        # 按模型分组
        model_results = {}
        for result in self.results:
            model_name = result.model_name
            if model_name not in model_results:
                model_results[model_name] = []
            model_results[model_name].append(result)
        
        # 计算统计信息
        analysis = {
            'model_performance': {},
            'speed_comparison': {},
            'score_analysis': {},
            'evaluation_metrics': {},  # 新增评估指标分析
            'ranking_consistency': {}
        }
        
        for model_name, results in model_results.items():
            # 性能统计
            processing_times = [r.processing_time for r in results]
            score_variances = [r.score_variance for r in results]
            
            # 收集所有top分数
            all_top_scores = []
            for r in results:
                all_top_scores.extend(r.top_scores)
            
            # 收集评估指标
            ndcg_scores = {}
            mrr_scores = []
            map_scores = {}
            precision_scores = {}
            recall_scores = {}
            hit_rates = []
            
            # 初始化指标收集
            k_values = [1, 3, 5, 10]
            for k in k_values:
                ndcg_scores[f'ndcg@{k}'] = []
                map_scores[f'map@{k}'] = []
                precision_scores[f'precision@{k}'] = []
                recall_scores[f'recall@{k}'] = []
            
            # 收集所有结果中的指标
            for r in results:
                if r.ndcg_scores:
                    for k, score in r.ndcg_scores.items():
                        if k in ndcg_scores:
                            ndcg_scores[k].append(score)
                
                if r.map_scores:
                    for k, score in r.map_scores.items():
                        if k in map_scores:
                            map_scores[k].append(score)
                
                if r.precision_scores:
                    for k, score in r.precision_scores.items():
                        if k in precision_scores:
                            precision_scores[k].append(score)
                
                if r.recall_scores:
                    for k, score in r.recall_scores.items():
                        if k in recall_scores:
                            recall_scores[k].append(score)
                
                mrr_scores.append(r.mrr)
                hit_rates.append(r.hit_rate)
            
            analysis['model_performance'][model_name] = {
                'total_queries': len(results),
                'avg_processing_time': np.mean(processing_times),
                'std_processing_time': np.std(processing_times),
                'min_processing_time': np.min(processing_times),
                'max_processing_time': np.max(processing_times)
            }
            
            analysis['score_analysis'][model_name] = {
                'avg_score_variance': np.mean(score_variances),
                'avg_top_score': np.mean(all_top_scores) if all_top_scores else 0,
                'std_top_score': np.std(all_top_scores) if all_top_scores else 0,
                'score_range': {
                    'min': np.min(all_top_scores) if all_top_scores else 0,
                    'max': np.max(all_top_scores) if all_top_scores else 0
                }
            }
            
            # 使用科学化方法统计评估指标
            def _calc_stats(values):
                """计算科学化的统计信息"""
                if not values:
                    return {'mean': 0.0, 'std': 0.0, 'min': 0.0, 'max': 0.0, 'count': 0}
                return {
                    'mean': float(np.mean(values)),
                    'std': float(np.std(values)),
                    'min': float(np.min(values)),
                    'max': float(np.max(values)),
                    'count': len(values)
                }
            
            # 构建科学化评估指标统计
            evaluation_metrics = {
                'MRR': _calc_stats(mrr_scores),
                'Hit Rate': _calc_stats(hit_rates)
            }
            
            # 添加所有k值的指标统计
            k_values = [1, 3, 5, 10]
            for k in k_values:
                evaluation_metrics[f'NDCG@{k}'] = _calc_stats(ndcg_scores.get(f'ndcg@{k}', []))
                evaluation_metrics[f'MAP@{k}'] = _calc_stats(map_scores.get(f'map@{k}', []))
                evaluation_metrics[f'Precision@{k}'] = _calc_stats(precision_scores.get(f'precision@{k}', []))
                evaluation_metrics[f'Recall@{k}'] = _calc_stats(recall_scores.get(f'recall@{k}', []))
            
            analysis['evaluation_metrics'][model_name] = evaluation_metrics
        
        # 模型间排序一致性分析
        if len(model_results) > 1:
            analysis['ranking_consistency'] = self._analyze_ranking_consistency(model_results)
        
        return analysis
    
    def _analyze_ranking_consistency(self, model_results: Dict[str, List[RerankOnlyResult]]) -> Dict[str, Any]:
        """
        分析模型间排序一致性
        
        Args:
            model_results: 按模型分组的结果
            
        Returns:
            Dict[str, Any]: 一致性分析结果
        """
        consistency_analysis = {}
        
        # 计算模型间的排序相关性
        model_names = list(model_results.keys())
        
        for i, model1 in enumerate(model_names):
            for j, model2 in enumerate(model_names[i+1:], i+1):
                # 找到共同的查询
                common_queries = set()
                results1_dict = {r.query: r for r in model_results[model1]}
                results2_dict = {r.query: r for r in model_results[model2]}
                
                common_queries = set(results1_dict.keys()) & set(results2_dict.keys())
                
                if len(common_queries) > 1:
                    # 计算排序相关性
                    correlations = []
                    for query in common_queries:
                        r1 = results1_dict[query]
                        r2 = results2_dict[query]
                        
                        # 获取前几名的分数
                        scores1 = r1.top_scores
                        scores2 = r2.top_scores
                        
                        if len(scores1) > 1 and len(scores2) > 1:
                            min_len = min(len(scores1), len(scores2))
                            corr = np.corrcoef(scores1[:min_len], scores2[:min_len])[0, 1]
                            if not np.isnan(corr):
                                correlations.append(corr)
                    
                    if correlations:
                        consistency_analysis[f"{model1}_vs_{model2}"] = {
                            'avg_correlation': np.mean(correlations),
                            'std_correlation': np.std(correlations),
                            'num_comparisons': len(correlations)
                        }
        
        return consistency_analysis
    
    def save_results(self, output_dir: str = None) -> None:
        """
        保存实验结果
        
        Args:
            output_dir: 输出目录，默认使用配置中的目录
        """
        if output_dir is None:
            output_dir = os.path.join(RESULTS_CACHE_DIR, 'rerank_only_comparison')
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存详细结果
        results_data = []
        for result in self.results:
            results_data.append({
                'model_name': result.model_name,
                'query': result.query,
                'processing_time': result.processing_time,
                'top_scores': result.top_scores,
                'score_variance': result.score_variance,
                'evaluation_metrics': {
                    'mrr': result.mrr,
                    'hit_rate': result.hit_rate,
                    'ndcg': result.ndcg_scores,
                    'map': result.map_scores,
                    'precision': result.precision_scores,
                    'recall': result.recall_scores
                },
                'final_ranking': result.rerank_result[:5]  # 只保存前5个结果
            })
        
        with open(os.path.join(output_dir, 'rerank_only_results.json'), 'w', encoding='utf-8') as f:
            json.dump(results_data, f, ensure_ascii=False, indent=2)
        
        # 保存分析报告
        analysis = self.analyze_results()
        with open(os.path.join(output_dir, 'rerank_only_analysis.json'), 'w', encoding='utf-8') as f:
            json.dump(analysis, f, ensure_ascii=False, indent=2)
        
        logger.info(f"实验结果已保存到: {output_dir}")
    
    def visualize_results(self, output_dir: str = None) -> None:
        """
        可视化实验结果
        
        Args:
            output_dir: 输出目录，默认使用配置中的目录
        """
        if not self.results:
            logger.warning("没有结果可以可视化")
            return
        
        if output_dir is None:
            output_dir = os.path.join(RESULTS_CACHE_DIR, 'rerank_only_comparison')
        os.makedirs(output_dir, exist_ok=True)
        
        # 准备数据
        df_data = []
        for result in self.results:
            df_data.append({
                'Model': result.model_name,
                'Processing Time': result.processing_time,
                'Score Variance': result.score_variance,
                'Avg Top Score': np.mean(result.top_scores) if result.top_scores else 0,
                'Max Score': max(result.top_scores) if result.top_scores else 0
            })
        
        df = pd.DataFrame(df_data)
        
        # 设置中文字体
        viz_config = RERANK_VISUALIZATION_CONFIG
        plt.rcParams['font.sans-serif'] = viz_config.get('font_family', ['SimHei', 'Arial Unicode MS', 'DejaVu Sans'])
        plt.rcParams['axes.unicode_minus'] = False
        
        # 使用配置中的图形参数
        figsize = viz_config.get('figsize', (15, 12))
        dpi = viz_config.get('dpi', 300)
        
        # 创建可视化
        fig, axes = plt.subplots(2, 2, figsize=figsize)
        
        # 处理时间对比
        sns.boxplot(data=df, x='Model', y='Processing Time', ax=axes[0, 0])
        axes[0, 0].set_title('处理时间对比')
        axes[0, 0].tick_params(axis='x', rotation=45)
        
        # 分数方差对比
        sns.boxplot(data=df, x='Model', y='Score Variance', ax=axes[0, 1])
        axes[0, 1].set_title('分数方差对比')
        axes[0, 1].tick_params(axis='x', rotation=45)
        
        # 平均top分数对比
        sns.boxplot(data=df, x='Model', y='Avg Top Score', ax=axes[1, 0])
        axes[1, 0].set_title('平均Top分数对比')
        axes[1, 0].tick_params(axis='x', rotation=45)
        
        # 时间vs分数散点图
        sns.scatterplot(data=df, x='Processing Time', y='Avg Top Score', hue='Model', ax=axes[1, 1])
        axes[1, 1].set_title('处理时间 vs 平均Top分数')
        
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'rerank_only_comparison.png'), dpi=dpi, bbox_inches='tight')
        plt.close()
        
        logger.info(f"可视化结果已保存到: {output_dir}")