# -*- coding: utf-8 -*-
"""
全局推荐器对比工具

本模块提供对所有推荐器的综合性能对比功能，包括BERT模型、相似度推荐器和聚类推荐器。

支持的推荐器:
- BERT模块: BertSimilarityRetriever, BertChineseRetriever
- 相似度模块: SentenceTransformerRetriever (多种相似度方法)
- 聚类模块: DBSCANClusteringRetriever, HierarchicalClusteringRetriever

"""

import sys
import os

# 在导入任何库之前强制设置环境变量
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["HUGGINGFACE_CO_URL_HOME"] = "https://hf-mirror.com"
os.environ["HUGGINGFACE_HUB_CACHE"] = os.path.expanduser("~/.cache/huggingface")
os.environ["SENTENCE_TRANSFORMERS_HOME"] = os.path.expanduser("~/.cache/sentence_transformers")

# 添加项目根目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入必要的库
import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import platform
from typing import List, Dict, Any, Tuple
from loguru import logger
import pandas as pd
from sklearn.metrics import silhouette_score

# 强制导入配置，确保镜像环境变量已设置
import questionretrieval.config

logger.info("🌐 使用 HuggingFace 国内镜像配置")
logger.debug(f"📁 缓存目录: {questionretrieval.config.HF_HUB_CACHE}")
logger.debug(f"🔗 镜像地址: {questionretrieval.config.HF_MIRROR_ENDPOINT}")

# 设置中文字体支持
def setup_chinese_font():
    """设置matplotlib中文字体支持"""
    try:
        if platform.system() == 'Windows':
            plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'KaiTi', 'FangSong']
        elif platform.system() == 'Darwin':  # macOS
            plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'Heiti TC', 'PingFang SC']
        else:  # Linux
            plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'WenQuanYi Micro Hei', 'WenQuanYi Zen Hei']
        
        plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
        logger.info("✅ 中文字体设置成功")
    except Exception as e:
        logger.warning(f"中文字体设置失败: {e}")

# 初始化中文字体
setup_chinese_font()

# 导入所有检索器
from questionretrieval.retrievers.bert.bert_similarity_retriever import BertSimilarityRetriever
from questionretrieval.retrievers.bert.bert_chinese_retriever import BertChineseRetriever
from questionretrieval.retrievers.similarity.sentence_transformer_retriever import SentenceTransformerRetriever
from questionretrieval.retrievers.clustering.dbscan_clustering_retriever import DBSCANClusteringRetriever
from questionretrieval.retrievers.clustering.hierarchical_clustering_retriever import HierarchicalClusteringRetriever
from questionretrieval.io import load_questions, load_sample_questions, get_test_queries


class GlobalRecommenderComparison:
    """
    全局推荐器对比工具
    
    提供所有推荐器的综合性能对比功能，包括准确性、速度、稳定性等多维度分析。
    """
    
    def __init__(self):
        """
        初始化全局对比工具
        """
        self.recommenders = {}
        self.test_questions = []
        self.test_queries = []
        self.expected_answers = []  # 添加期望答案列表
        self.recommender_categories = {
            'BERT': [],
            'Similarity': [],
            'Clustering': []
        }
        
    def setup_all_recommenders(self):
        """
        初始化所有推荐器
        """
        logger.info("初始化所有推荐器...")
        
        # BERT推荐器
        try:
            bert_roberta = BertSimilarityRetriever(
                model_name="hfl/chinese-roberta-wwm-ext"
            )
            self.recommenders['BERT_RoBERTa'] = bert_roberta
            self.recommender_categories['BERT'].append('BERT_RoBERTa')
            logger.info("成功初始化 BERT RoBERTa 推荐器")
        except Exception as e:
            logger.error(f"初始化 BERT RoBERTa 推荐器失败: {e}")
        
        try:
            bert_chinese = BertChineseRetriever(
                model_name="bert-base-chinese"
            )
            self.recommenders['BERT_Chinese'] = bert_chinese
            self.recommender_categories['BERT'].append('BERT_Chinese')
            logger.info("成功初始化 BERT Chinese 推荐器")
        except Exception as e:
            logger.error(f"初始化 BERT Chinese 推荐器失败: {e}")
        
        # Sentence Transformer推荐器 - 不同相似度方法
        similarity_methods = ['cosine', 'pearson', 'euclidean']
        for method in similarity_methods:
            try:
                st_recommender = SentenceTransformerRetriever(
                    model_name='paraphrase-multilingual-MiniLM-L12-v2',
                    similarity_method=method
                )
                name = f'SentenceTransformer_{method}'
                self.recommenders[name] = st_recommender
                self.recommender_categories['Similarity'].append(name)
                logger.info(f"成功初始化 SentenceTransformer ({method})")
            except Exception as e:
                logger.error(f"初始化 SentenceTransformer ({method}) 失败: {e}")
        
        # 聚类推荐器
        try:
            dbscan_recommender = DBSCANClusteringRetriever(
                eps=0.3,
                min_samples=2
            )
            self.recommenders['DBSCAN'] = dbscan_recommender
            self.recommender_categories['Clustering'].append('DBSCAN')
            logger.info("成功初始化 DBSCAN 聚类推荐器")
        except Exception as e:
            logger.error(f"初始化 DBSCAN 聚类推荐器失败: {e}")
        
        try:
            hierarchical_recommender = HierarchicalClusteringRetriever(
                levels=[0.8, 0.6, 0.4, 0.2],
                linkage_method='ward'
            )
            self.recommenders['Hierarchical'] = hierarchical_recommender
            self.recommender_categories['Clustering'].append('Hierarchical')
            logger.info("成功初始化层级聚类推荐器")
        except Exception as e:
            logger.error(f"初始化层级聚类推荐器失败: {e}")
    
    def load_test_data(self, questions: List[str], queries: List[str] = None, expected_answers: List[str] = None):
        """
        加载测试数据
        
        Args:
            questions: 候选问题列表
            queries: 查询问题列表，如果为None则从候选问题中随机选择
            expected_answers: 期望答案列表，与queries对应
        """
        self.test_questions = questions
        
        if queries is None:
            # 从候选问题中随机选择一些作为查询
            np.random.seed(42)
            query_indices = np.random.choice(len(questions), min(10, len(questions)), replace=False)
            self.test_queries = [questions[i] for i in query_indices]
            self.expected_answers = []
        else:
            self.test_queries = queries
            self.expected_answers = expected_answers if expected_answers else []
            
        # 为所有推荐器加载候选问题
        for name, recommender in self.recommenders.items():
            try:
                recommender.load_candidates(questions)
                logger.info(f"为 {name} 加载了 {len(questions)} 个候选问题")
            except Exception as e:
                logger.error(f"为 {name} 加载候选问题失败: {e}")
    
    def compare_accuracy(self, top_k: int = 5) -> Dict[str, Any]:
        """
        比较所有推荐器的准确性
        
        Args:
            top_k: 返回的推荐数量
            
        Returns:
            包含准确性对比结果的字典
        """
        logger.info(f"开始全局准确性对比 (top_k={top_k})...")
        
        results = {}
        
        for name, recommender in self.recommenders.items():
            try:
                similarities = []
                recommendation_counts = []
                accuracy_scores = []  # 添加准确率计算
                
                for i, query in enumerate(self.test_queries):
                    if hasattr(recommender, 'recommend'):
                        recommendations = recommender.recommend(
                            query_question=query,
                            top_k=top_k
                        )
                    elif hasattr(recommender, 'find_similar_questions'):
                        recommendations = recommender.find_similar_questions(
                            user_query=query,
                            top_k=top_k
                        )
                    else:
                        continue
                    
                    if recommendations:
                        # 处理不同推荐器的返回格式
                        current_similarities = []
                        recommended_questions = []
                        
                        for r in recommendations:
                            if isinstance(r, dict):
                                # BERT推荐器返回字典格式
                                current_similarities.append(r.get('similarity', 0))
                                recommended_questions.append(r.get('question', ''))
                            elif isinstance(r, (tuple, list)) and len(r) >= 3:
                                # SentenceTransformer等推荐器返回元组格式
                                current_similarities.append(r[-1])  # 相似度在最后一个位置
                                recommended_questions.append(r[0])  # 问题在第一个位置
                            else:
                                # 其他格式，尝试获取最后一个元素
                                try:
                                    current_similarities.append(float(r[-1]) if hasattr(r, '__getitem__') else 0)
                                    recommended_questions.append(str(r[0]) if hasattr(r, '__getitem__') else '')
                                except (IndexError, TypeError, ValueError):
                                    current_similarities.append(0)
                                    recommended_questions.append('')
                        
                        if current_similarities:
                            avg_similarity = np.mean(current_similarities)
                            similarities.append(avg_similarity)
                            recommendation_counts.append(len(recommendations))
                            
                            # 计算准确率（如果有期望答案）
                            if i < len(self.expected_answers) and self.expected_answers[i]:
                                expected_answer = self.expected_answers[i]
                                # 检查推荐的问题中是否包含期望答案
                                found_expected = any(expected_answer in question for question in recommended_questions)
                                accuracy_scores.append(1.0 if found_expected else 0.0)
                
                if similarities:
                    result_dict = {
                        'avg_similarity': np.mean(similarities),
                        'std_similarity': np.std(similarities),
                        'min_similarity': np.min(similarities),
                        'max_similarity': np.max(similarities),
                        'avg_recommendations': np.mean(recommendation_counts),
                        'recommendation_coverage': len([s for s in similarities if s > 0]) / len(self.test_queries)
                    }
                    
                    # 添加准确率指标
                    if accuracy_scores:
                        result_dict['accuracy_rate'] = np.mean(accuracy_scores)
                        result_dict['accuracy_count'] = sum(accuracy_scores)
                        result_dict['total_queries'] = len(accuracy_scores)
                    
                    results[name] = result_dict
                else:
                    results[name] = {
                        'avg_similarity': 0,
                        'recommendation_coverage': 0,
                        'accuracy_rate': 0,
                        'error': '无推荐结果'
                    }
                    
                logger.info(f"{name} 平均相似度: {results[name].get('avg_similarity', 0):.3f}, 准确率: {results[name].get('accuracy_rate', 0):.3f}")
                
            except Exception as e:
                logger.error(f"{name} 准确性测试失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def compare_speed(self, num_queries: int = 10) -> Dict[str, Any]:
        """
        比较所有推荐器的推理速度
        
        Args:
            num_queries: 测试查询数量
            
        Returns:
            包含速度对比结果的字典
        """
        logger.info(f"开始全局速度对比 (查询数量={num_queries})...")
        
        test_queries = self.test_queries[:num_queries]
        results = {}
        
        for name, recommender in self.recommenders.items():
            try:
                # 测试初始化时间（对于聚类方法）
                start_time = time.time()
                if name in self.recommender_categories['Clustering']:
                    recommender.load_candidates(self.test_questions)  # 重新聚类
                initialization_time = time.time() - start_time
                
                # 测试推荐时间
                recommendation_times = []
                for query in test_queries:
                    start_time = time.time()
                    
                    if hasattr(recommender, 'recommend'):
                        _ = recommender.recommend(query_question=query, top_k=5)
                    elif hasattr(recommender, 'find_similar_questions'):
                        _ = recommender.find_similar_questions(user_query=query, top_k=5)
                    
                    end_time = time.time()
                    recommendation_times.append(end_time - start_time)
                
                results[name] = {
                    'initialization_time': initialization_time,
                    'avg_recommendation_time': np.mean(recommendation_times),
                    'std_recommendation_time': np.std(recommendation_times),
                    'total_recommendation_time': np.sum(recommendation_times),
                    'total_time': initialization_time + np.sum(recommendation_times)
                }
                
                logger.info(f"{name} 初始化时间: {initialization_time:.3f}秒, 平均推荐时间: {np.mean(recommendation_times):.3f}秒")
                
            except Exception as e:
                logger.error(f"{name} 速度测试失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def compare_stability(self, num_runs: int = 3) -> Dict[str, Any]:
        """
        比较推荐器的稳定性（多次运行结果的一致性）
        
        Args:
            num_runs: 运行次数
            
        Returns:
            包含稳定性对比结果的字典
        """
        logger.info(f"开始稳定性对比 (运行次数={num_runs})...")
        
        results = {}
        test_query = self.test_queries[0] if self.test_queries else "测试问题"
        
        for name, recommender in self.recommenders.items():
            try:
                all_similarities = []
                
                for run in range(num_runs):
                    if hasattr(recommender, 'recommend'):
                        recommendations = recommender.recommend(
                            query_question=test_query,
                            top_k=5
                        )
                    elif hasattr(recommender, 'find_similar_questions'):
                        recommendations = recommender.find_similar_questions(
                            user_query=test_query,
                            top_k=5
                        )
                    else:
                        continue
                    
                    if recommendations:
                        # 处理不同推荐器的返回格式
                        similarities = []
                        for r in recommendations:
                            if isinstance(r, dict):
                                # BERT推荐器返回字典格式
                                similarities.append(r.get('similarity', 0))
                            elif isinstance(r, (tuple, list)) and len(r) >= 3:
                                # SentenceTransformer等推荐器返回元组格式
                                similarities.append(r[-1])  # 相似度在最后一个位置
                            else:
                                # 其他格式，尝试获取最后一个元素
                                try:
                                    similarities.append(float(r[-1]) if hasattr(r, '__getitem__') else 0)
                                except (IndexError, TypeError, ValueError):
                                    similarities.append(0)
                        
                        if similarities:
                            all_similarities.append(np.mean(similarities))
                
                if all_similarities:
                    results[name] = {
                        'avg_similarity': np.mean(all_similarities),
                        'std_similarity': np.std(all_similarities),
                        'stability_score': 1 - (np.std(all_similarities) / np.mean(all_similarities)) if np.mean(all_similarities) > 0 else 0
                    }
                else:
                    results[name] = {'error': '无推荐结果'}
                    
            except Exception as e:
                logger.error(f"{name} 稳定性测试失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def generate_performance_report(self, accuracy_results: Dict, speed_results: Dict, 
                                  stability_results: Dict) -> pd.DataFrame:
        """
        生成性能报告表格
        
        Args:
            accuracy_results: 准确性结果
            speed_results: 速度结果
            stability_results: 稳定性结果
            
        Returns:
            性能报告DataFrame
        """
        report_data = []
        
        for name in self.recommenders.keys():
            # 确定类别
            category = 'Unknown'
            for cat, methods in self.recommender_categories.items():
                if name in methods:
                    category = cat
                    break
            
            row = {
                '推荐器': name,
                '类别': category,
                '平均相似度': accuracy_results.get(name, {}).get('avg_similarity', 0),
                '相似度标准差': accuracy_results.get(name, {}).get('std_similarity', 0),
                '准确率': accuracy_results.get(name, {}).get('accuracy_rate', 0),
                '准确匹配数': accuracy_results.get(name, {}).get('accuracy_count', 0),
                '推荐覆盖率': accuracy_results.get(name, {}).get('recommendation_coverage', 0),
                '平均推荐时间(秒)': speed_results.get(name, {}).get('avg_recommendation_time', 0),
                '总时间(秒)': speed_results.get(name, {}).get('total_time', 0),
                '稳定性分数': stability_results.get(name, {}).get('stability_score', 0)
            }
            report_data.append(row)
        
        df = pd.DataFrame(report_data)
        return df
    
    def visualize_comprehensive_comparison(self, accuracy_results: Dict, speed_results: Dict, 
                                         stability_results: Dict, save_path: str = None):
        """
        Visualize comprehensive comparison results in SCI paper style
        
        Args:
            accuracy_results: Accuracy results
            speed_results: Speed results
            stability_results: Stability results (parameter retained but not used)
            save_path: Path to save the figure
        """
        # Set SCI paper style parameters
        plt.rcParams.update({
            'font.family': 'serif',
            'font.serif': ['Times New Roman', 'DejaVu Serif'],
            'font.size': 10,
            'axes.linewidth': 1.2,
            'axes.spines.top': False,
            'axes.spines.right': False,
            'axes.grid': True,
            'grid.alpha': 0.3,
            'grid.linewidth': 0.8,
            'legend.frameon': True,
            'legend.fancybox': False,
            'legend.edgecolor': 'black',
            'legend.framealpha': 1.0
        })
        
        setup_chinese_font()
        fig, axes = plt.subplots(2, 2, figsize=(14, 10))
        fig.suptitle('Global Recommender Performance Comparison', fontsize=16, fontweight='bold', y=0.95)
        
        # Define SCI color scheme
        sci_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f']
        
        # Define color mapping for different methods
        method_color_map = {
            'cosine': '#1f77b4',
            'pearson': '#ff7f0e', 
            'euclidean': '#2ca02c',
            'manhattan': '#d62728',
            'linear': '#9467bd',
            'default': '#8c564b'
        }
        
        # 1. Average similarity comparison (show all similarity methods)
        ax1 = axes[0, 0]
        
        # Collect all similarity method data
        similarity_methods = []
        similarity_scores = []
        method_colors = []
        
        for name, result in accuracy_results.items():
            if 'avg_similarity' in result:
                # Extract similarity method name
                if 'SentenceTransformer_' in name:
                    method_name = name.replace('SentenceTransformer_', '')
                else:
                    method_name = name
                
                similarity_methods.append(method_name)
                similarity_scores.append(result['avg_similarity'])
                
                # Assign colors based on method type
                color_assigned = False
                for method_key, color in method_color_map.items():
                    if method_key in name.lower():
                        method_colors.append(color)
                        color_assigned = True
                        break
                if not color_assigned:
                    method_colors.append(method_color_map['default'])
        
        if similarity_methods:
            bars = ax1.bar(similarity_methods, similarity_scores, 
                          color=method_colors, edgecolor='black', linewidth=0.8, alpha=0.8)
            ax1.set_title('(a) Similarity Method Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax1.set_ylabel('Average Similarity', fontsize=11)
            ax1.tick_params(axis='x', rotation=45, labelsize=10)
            ax1.tick_params(axis='y', labelsize=10)
            ax1.set_ylim(0, max(similarity_scores) * 1.15)
            
            # Add value labels
            for bar, val in zip(bars, similarity_scores):
                ax1.text(bar.get_x() + bar.get_width()/2, bar.get_height() + max(similarity_scores) * 0.02,
                        f'{val:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
            
            # Add legend with unique methods
            legend_elements = []
            unique_methods = set()
            for i, method in enumerate(similarity_methods):
                if method not in unique_methods:
                    legend_elements.append(plt.Rectangle((0,0),1,1, facecolor=method_colors[i], 
                                                        edgecolor='black', linewidth=0.8, alpha=0.8, label=method))
                    unique_methods.add(method)
            ax1.legend(handles=legend_elements, loc='upper right', fontsize=9)
        
        # 2. Recommendation time comparison
        ax2 = axes[0, 1]
        speed_methods = []
        speed_times = []
        speed_colors = []
        
        for name, result in speed_results.items():
            if 'avg_recommendation_time' in result:
                # Extract method name
                if 'SentenceTransformer_' in name:
                    method_name = name.replace('SentenceTransformer_', '')
                else:
                    method_name = name
                
                speed_methods.append(method_name)
                speed_times.append(result['avg_recommendation_time'])
                
                # Assign colors based on method type
                color_assigned = False
                for method_key, color in method_color_map.items():
                    if method_key in name.lower():
                        speed_colors.append(color)
                        color_assigned = True
                        break
                if not color_assigned:
                    speed_colors.append(method_color_map['default'])
        
        if speed_methods:
            bars2 = ax2.bar(speed_methods, speed_times, 
                           color=speed_colors, edgecolor='black', linewidth=0.8, alpha=0.8)
            ax2.set_title('(b) Recommendation Time Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax2.set_ylabel('Recommendation Time (s)', fontsize=11)
            ax2.tick_params(axis='x', rotation=45, labelsize=10)
            ax2.tick_params(axis='y', labelsize=10)
            ax2.set_ylim(0, max(speed_times) * 1.15)
            
            for bar, val in zip(bars2, speed_times):
                ax2.text(bar.get_x() + bar.get_width()/2, bar.get_height() + max(speed_times) * 0.02,
                        f'{val:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        
        # 3. Accuracy vs Speed scatter plot
        ax3 = axes[1, 0]
        scatter_data = []
        
        for name in self.recommenders.keys():
            if (name in accuracy_results and 'avg_similarity' in accuracy_results[name] and
                name in speed_results and 'avg_recommendation_time' in speed_results[name]):
                
                x = speed_results[name]['avg_recommendation_time']
                y = accuracy_results[name]['avg_similarity']
                
                # Extract method name
                if 'SentenceTransformer_' in name:
                    method_name = name.replace('SentenceTransformer_', '')
                else:
                    method_name = name
                
                # Determine color based on method type
                color = method_color_map['default']
                for method_key, method_color in method_color_map.items():
                    if method_key in name.lower():
                        color = method_color
                        break
                
                scatter_data.append((x, y, color, method_name))
        
        # Plot scatter points
        for x, y, color, method_name in scatter_data:
            ax3.scatter(x, y, color=color, s=80, alpha=0.8, 
                       edgecolors='black', linewidth=0.8, label=method_name)
        
        ax3.set_title('(c) Accuracy vs Speed Trade-off', 
                     fontweight='bold', fontsize=12, pad=15)
        ax3.set_xlabel('Recommendation Time (s)', fontsize=11)
        ax3.set_ylabel('Average Similarity', fontsize=11)
        ax3.tick_params(axis='both', labelsize=10)
        
        # Add legend with unique methods only
        handles, labels = ax3.get_legend_handles_labels()
        by_label = dict(zip(labels, handles))
        ax3.legend(by_label.values(), by_label.keys(), 
                  bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=9)
        
        # Hide unused subplot
        axes[1, 1].axis('off')
        
        plt.tight_layout(pad=2.0)
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight', 
                       facecolor='white', edgecolor='none', format='png')
            logger.info(f"Comprehensive comparison chart saved to: {save_path}")
        
        plt.show()
    
    def run_full_comparison(self, questions: List[str], queries: List[str] = None, 
                          expected_answers: List[str] = None, save_plot: str = None, save_report: str = None) -> Dict[str, Any]:
        """
        运行完整的全局对比分析
        
        Args:
            questions: 候选问题列表
            queries: 查询问题列表
            expected_answers: 期望答案列表
            save_plot: 保存图表的路径
            save_report: 保存报告的路径
            
        Returns:
            完整的对比结果
        """
        logger.info("开始完整的全局推荐器对比分析...")
        
        # 设置推荐器和数据
        self.setup_all_recommenders()
        self.load_test_data(questions, queries, expected_answers)
        
        # 运行各项对比
        accuracy_results = self.compare_accuracy()
        speed_results = self.compare_speed()
        stability_results = self.compare_stability()
        
        # 生成性能报告
        performance_report = self.generate_performance_report(
            accuracy_results, speed_results, stability_results
        )
        
        # 生成可视化
        self.visualize_comprehensive_comparison(
            accuracy_results, speed_results, stability_results, save_plot
        )
        
        # 保存报告
        if save_report:
            performance_report.to_csv(save_report, index=False, encoding='utf-8-sig')
            logger.info(f"性能报告已保存到: {save_report}")
        
        # 生成总结
        summary = self._generate_comprehensive_summary(
            accuracy_results, speed_results, stability_results, performance_report
        )
        
        results = {
            'accuracy': accuracy_results,
            'speed': speed_results,
            'stability': stability_results,
            'performance_report': performance_report,
            'summary': summary
        }
        
        logger.info("全局推荐器对比分析完成")
        return results
    
    def _generate_comprehensive_summary(self, accuracy_results: Dict, speed_results: Dict, 
                                      stability_results: Dict, performance_report: pd.DataFrame) -> Dict[str, Any]:
        """
        生成综合对比结果总结
        
        Args:
            accuracy_results: 准确性结果
            speed_results: 速度结果
            stability_results: 稳定性结果
            performance_report: 性能报告
            
        Returns:
            总结信息
        """
        summary = {}
        
        # 最佳推荐器
        best_accuracy = performance_report.loc[performance_report['平均相似度'].idxmax(), '推荐器']
        best_accuracy_rate = performance_report.loc[performance_report['准确率'].idxmax(), '推荐器']
        fastest_method = performance_report.loc[performance_report['平均推荐时间(秒)'].idxmin(), '推荐器']
        most_stable = performance_report.loc[performance_report['稳定性分数'].idxmax(), '推荐器']
        
        # 类别统计
        category_stats = performance_report.groupby('类别').agg({
            '平均相似度': 'mean',
            '准确率': 'mean',
            '平均推荐时间(秒)': 'mean',
            '稳定性分数': 'mean'
        }).round(3)
        
        # 转换为正确的字典格式
        category_performance = {}
        for category in category_stats.index:
            category_performance[category] = {
                '平均相似度': category_stats.loc[category, '平均相似度'],
                '准确率': category_stats.loc[category, '准确率'],
                '平均推荐时间(秒)': category_stats.loc[category, '平均推荐时间(秒)'],
                '稳定性分数': category_stats.loc[category, '稳定性分数']
            }
        
        summary.update({
            'best_accuracy': best_accuracy,
            'best_accuracy_rate': best_accuracy_rate,
            'fastest_method': fastest_method,
            'most_stable': most_stable,
            'best_accuracy_score': f"{performance_report['平均相似度'].max():.3f}",
            'best_accuracy_rate_score': f"{performance_report['准确率'].max():.3f}",
            'fastest_time': f"{performance_report['平均推荐时间(秒)'].min():.3f}秒",
            'highest_stability': f"{performance_report['稳定性分数'].max():.3f}",
            'category_performance': category_performance,
            'total_recommenders': len(self.recommenders),
            'successful_recommenders': len([r for r in accuracy_results.values() if 'error' not in r])
        })
        
        return summary


def load_query_answer_pairs(filename: str = "sample_questions.json") -> Tuple[List[str], List[str]]:
    """
    加载查询-答案对数据
    
    Returns:
        Tuple[List[str], List[str]]: (查询列表, 期望答案列表)
    """
    import json
    
    # 获取examples目录路径
    examples_dir = os.path.dirname(os.path.abspath(__file__))
    file_path = os.path.join(examples_dir, filename)
    
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        queries = []
        expected_answers = []
        
        for item in data:
            if 'query' in item and 'expected_answer' in item:
                queries.append(item['query'])
                expected_answers.append(item['expected_answer'])
        
        return queries, expected_answers
    except Exception as e:
        logger.error(f"❌ 加载查询-答案对数据失败: {e}")
        return [], []

if __name__ == "__main__":
    # 加载测试数据
    try:
        # 加载原始问题库作为候选问题
        candidate_questions = load_questions()  # 从questions.json加载候选问题
        logger.info(f"✅ 成功加载 {len(candidate_questions)} 个候选问题")
        
        # 加载查询-答案对用于准确性测试
        test_queries, expected_answers = load_query_answer_pairs()
        logger.info(f"✅ 成功加载 {len(test_queries)} 个测试查询")
        
    except Exception as e:
        logger.error(f"❌ 加载问题数据失败: {e}")
        exit(1)
    
    # 创建全局对比器
    global_comparator = GlobalRecommenderComparison()
    
    # 运行完整对比
    results = global_comparator.run_full_comparison(
        questions=candidate_questions,
        queries=test_queries,
        expected_answers=expected_answers,
        save_plot="reports/global_comparison_results.png",
        save_report="reports/global_performance_report.csv"
    )
    
    logger.info("\n=== 全局对比结果总结 ===")
    logger.info(f"准确性最好的推荐器: {results['summary']['best_accuracy']}")
    logger.info(f"准确率最高的推荐器: {results['summary']['best_accuracy_rate']}")
    logger.info(f"速度最快的推荐器: {results['summary']['fastest_method']}")
    logger.info(f"最稳定的推荐器: {results['summary']['most_stable']}")
    logger.info(f"\n成功测试的推荐器数量: {results['summary']['successful_recommenders']}/{results['summary']['total_recommenders']}")
    
    logger.info("\n=== 类别性能统计 ===")
    for category, stats in results['summary']['category_performance'].items():
        logger.info(f"{category}类别:")
        logger.info(f"  平均相似度: {stats['平均相似度']:.3f}")
        logger.info(f"  平均准确率: {stats['准确率']:.3f}")
        logger.info(f"  平均推荐时间: {stats['平均推荐时间(秒)']:.3f}秒")
        logger.info(f"  平均稳定性: {stats['稳定性分数']:.3f}")
    
    # 保存每种方法对sample_questions中每个问题的检索结果
    logger.info("\n=== 保存各推荐器对所有测试问题的检索结果 ===")
    
    import json
    from datetime import datetime
    
    # 创建保存目录
    os.makedirs("reports/retrieval_results", exist_ok=True)
    
    # 为每种方法保存检索结果
    for name, recommender in global_comparator.recommenders.items():
        logger.info(f"\n💾 保存 {name} 的检索结果...")
        method_results = []
        
        try:
            for i, query in enumerate(test_queries):
                query_result = {
                    "query_index": i,
                    "query": query,
                    "expected_answer": expected_answers[i] if i < len(expected_answers) else None,
                    "retrieved_questions": [],
                    "timestamp": datetime.now().isoformat()
                }
                
                # 根据推荐器类型使用不同的方法
                if 'BERT' in name:
                    # BERT推荐器使用find_similar_questions方法
                    recommender_results = recommender.find_similar_questions(user_query=query, top_k=5)
                    for j, result in enumerate(recommender_results):
                        question = result['question']
                        similarity = result['similarity']
                        query_result["retrieved_questions"].append({
                            "rank": j + 1,
                            "question": question,
                            "similarity": float(similarity)
                        })
                else:
                    # 其他推荐器使用recommend方法
                    recommender_results = recommender.recommend(query, top_k=5)
                    for j, result in enumerate(recommender_results):
                        if len(result) >= 3:  # 确保结果包含足够的信息
                            question, score, similarity = result[:3]
                            query_result["retrieved_questions"].append({
                                "rank": j + 1,
                                "question": question,
                                "similarity": float(similarity),
                                "score": float(score)
                            })
                        else:
                            query_result["retrieved_questions"].append({
                                "rank": j + 1,
                                "question": result[0] if result else 'N/A',
                                "similarity": 0.0
                            })
                
                method_results.append(query_result)
            
            # 保存到JSON文件
            output_file = f"reports/retrieval_results/{name.replace(' ', '_').replace('/', '_')}_results.json"
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump({
                    "method_name": name,
                    "total_queries": len(test_queries),
                    "results": method_results,
                    "generated_at": datetime.now().isoformat()
                }, f, ensure_ascii=False, indent=2)
            
            logger.info(f"  ✅ 已保存到: {output_file}")
            
        except Exception as e:
            logger.error(f"  ❌ {name} 保存失败: {e}")
    
    logger.info(f"\n📁 所有检索结果已保存到 reports/retrieval_results/ 目录")