# -*- coding: utf-8 -*-
"""
聚类推荐器对比工具

本模块提供对不同聚类推荐器的性能对比功能，包括聚类质量、推荐准确性、速度等指标。

支持的推荐器:
- DBSCANClusteringRecommender: 基于DBSCAN的聚类推荐器
- HierarchicalClusteringRecommender: 基于层级聚类的推荐器

作者: QA Retrieval Team
版本: 1.0.0
"""

import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import platform
from typing import List, Dict, Any, Tuple
from loguru import logger
from sklearn.metrics import silhouette_score, adjusted_rand_score
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入配置模块（自动配置HuggingFace镜像）
try:
    from config import HuggingFaceConfig
    # 配置已在config模块导入时自动完成
except ImportError:
    # 如果无法导入config模块，使用备用配置
    os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
    os.environ['TRANSFORMERS_CACHE'] = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'models_cache')
    os.environ['SENTENCE_TRANSFORMERS_HOME'] = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'models_cache')

# 设置字体配置：英文使用Times New Roman，中文使用SimHei
# 使用混合字体策略：SimHei支持中文，Times New Roman支持英文
plt.rcParams['font.sans-serif'] = ['SimHei', 'Times New Roman', 'Arial', 'DejaVu Sans']
plt.rcParams['font.serif'] = ['SimHei', 'Times New Roman', 'DejaVu Serif', 'serif']
plt.rcParams['font.family'] = 'sans-serif'  # 使用sans-serif确保中文优先显示
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

from questionretrieval.retrievers.clustering.dbscan_clustering_retriever import DBSCANClusteringRetriever
from questionretrieval.retrievers.clustering.hierarchical_clustering_retriever import HierarchicalClusteringRetriever
from questionretrieval.io.data_loader import load_questions, load_sample_questions, get_test_queries
from data_utils import load_sample_questions_from_json, get_question_pairs, calculate_retrieval_accuracy


class ClusteringMethodComparison:
    """
    聚类推荐器对比工具
    
    提供多种聚类推荐器的性能对比功能，包括聚类质量、推荐准确性和速度分析。
    """
    
    def __init__(self):
        """
        初始化对比工具
        """
        self.retrievers = {}
        self.test_questions = []
        self.test_queries = []
        
    def setup_retrievers(self):
        """
        初始化不同的聚类检索器
        """
        logger.info("初始化聚类检索器...")
        
        try:
            # DBSCAN聚类检索器 - 优化参数
            dbscan_retriever = DBSCANClusteringRetriever(
                eps=0.15,  # 更合适的eps值
                min_samples=2
            )
            self.retrievers['DBSCAN'] = dbscan_retriever
            logger.info("成功初始化 DBSCAN 聚类检索器")
        except Exception as e:
            logger.error(f"初始化 DBSCAN 聚类检索器失败: {e}")
        
        try:
            # 层级聚类检索器 - 使用多层级聚类
            hierarchical_retriever = HierarchicalClusteringRetriever(
                levels=[0.9, 0.7, 0.5, 0.3],  # 多层级聚类
                level_weights=[0.3, 0.25, 0.2, 0.15],  # 多层级权重
                linkage_method='average',
                outlier_detection=True,  # 启用异常值检测
                model_names='paraphrase-multilingual-MiniLM-L12-v2'  # 使用单一模型
            )
            self.retrievers['Hierarchical'] = hierarchical_retriever
            logger.info("成功初始化层级聚类检索器")
        except Exception as e:
            logger.error(f"初始化层级聚类检索器失败: {e}")
    
    def load_test_data(self, use_sample_json: bool = True, questions: List[str] = None, queries: List[str] = None):
        """
        加载测试数据
        
        Args:
            use_sample_json: 是否使用 sample_questions.json 文件
            questions: 候选问题列表（当use_sample_json=False时使用）
            queries: 查询问题列表（当use_sample_json=False时使用）
        """
        if use_sample_json:
            # 从 sample_questions.json 加载数据
            logger.info("从 sample_questions.json 加载测试数据...")
            candidate_questions, query_questions = load_sample_questions_from_json()
            self.test_questions = candidate_questions
            self.test_queries = query_questions
            self.question_pairs = get_question_pairs()  # 保存问题对用于准确性计算
        else:
            # 使用传入的数据
            self.test_questions = questions or []
            if queries is None and questions:
                # 从候选问题中随机选择一些作为查询
                np.random.seed(42)
                query_indices = np.random.choice(len(questions), min(10, len(questions)), replace=False)
                self.test_queries = [questions[i] for i in query_indices]
            else:
                self.test_queries = queries or []
            self.question_pairs = []
            
        # 为所有检索器加载候选问题
        for name, retriever in self.retrievers.items():
            try:
                retriever.load_candidates(self.test_questions)
                logger.info(f"为 {name} 加载了 {len(self.test_questions)} 个候选问题")
            except Exception as e:
                logger.error(f"为 {name} 加载候选问题失败: {e}")
    
    def compare_accuracy(self, top_k: int = 5) -> Dict:
        """
        比较不同聚类方法的准确性
        
        Args:
            top_k: 计算Top-K准确率
            
        Returns:
            包含准确性指标的字典
        """
        logger.info(f"开始准确性比较 (Top-{top_k})...")
        
        accuracy_results = {}
        
        for name, retriever in self.retrievers.items():
            logger.info(f"测试 {name} 检索器...")
            
            if hasattr(self, 'question_pairs') and self.question_pairs:
                # 使用 sample_questions.json 数据计算准确性指标
                try:
                    metrics = calculate_retrieval_accuracy(
                        retriever, self.question_pairs, top_k=top_k
                    )
                    accuracy_results[name] = metrics
                except Exception as e:
                    logger.error(f"{name} 计算准确性指标时出错: {e}")
                    accuracy_results[name] = {
                        'top_1_hit_rate': 0.0,
                        'top_k_hit_rate': 0.0,
                        'mrr': 0.0,
                        'avg_similarity': 0.0
                    }
            else:
                # 使用原有的相似度计算方法
                retriever_results = {}
                similarities = []
                
                for query in self.test_queries:
                    try:
                        results = retriever.find_similar_questions(query, top_k=top_k)
                        retriever_results[query] = results
                        
                        # 收集相似度分数
                        if results:
                            similarities.append(results[0]['similarity'])
                            
                    except Exception as e:
                        logger.error(f"{name} 处理查询 '{query}' 时出错: {e}")
                        retriever_results[query] = []
                
                # 计算平均相似度
                avg_similarity = np.mean(similarities) if similarities else 0
                
                accuracy_results[name] = {
                    'results': retriever_results,
                    'avg_similarity': avg_similarity,
                    'total_queries': len(self.test_queries),
                    'successful_queries': len(similarities)
                }
            
            logger.info(f"{name} 准确性指标计算完成")
        
        logger.info("✅ 准确性比较完成")
        return accuracy_results
    
    def compare_clustering_quality(self) -> Dict[str, Any]:
        """
        比较不同聚类方法的聚类质量
        
        Returns:
            包含聚类质量对比结果的字典
        """
        logger.info("开始聚类质量对比...")
        
        results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                # 根据检索器类型调用不同的方法
                if name == 'Hierarchical':
                    # 层级聚类检索器需要指定层级
                    cluster_info = retriever.get_cluster_info(level=0)
                    
                    # 从clusters字典中提取信息
                    cluster_sizes = [info['size'] for info in cluster_info['clusters'].values()]
                    n_clusters = cluster_info['n_clusters']
                    
                    # 对于层级聚类，我们无法直接计算轮廓系数，因为没有直接的labels和embeddings
                    results[name] = {
                        'silhouette_score': -1,  # 层级聚类暂不支持轮廓系数计算
                        'n_clusters': n_clusters,
                        'n_noise': 0,  # 层级聚类没有噪声点概念
                        'noise_ratio': 0,
                        'cluster_sizes': cluster_sizes
                    }
                    
                    logger.info(f"{name} - 聚类数: {n_clusters}, 轮廓系数: 不适用")
                else:
                    # DBSCAN等其他检索器
                    cluster_info = retriever.get_cluster_info()
                    
                    if 'labels' in cluster_info and 'embeddings' in cluster_info:
                        labels = cluster_info['labels']
                        embeddings = cluster_info['embeddings']
                        
                        # 计算轮廓系数
                        if len(set(labels)) > 1:  # 需要至少2个聚类
                            silhouette_avg = silhouette_score(embeddings, labels)
                        else:
                            silhouette_avg = -1
                        
                        # 统计聚类信息
                        unique_labels = set(labels)
                        n_clusters = len(unique_labels) - (1 if -1 in labels else 0)
                        n_noise = list(labels).count(-1) if -1 in labels else 0
                        
                        results[name] = {
                            'silhouette_score': silhouette_avg,
                            'n_clusters': n_clusters,
                            'n_noise': n_noise,
                            'noise_ratio': n_noise / len(labels) if len(labels) > 0 else 0,
                            'cluster_sizes': [list(labels).count(i) for i in unique_labels if i != -1]
                        }
                        
                        logger.info(f"{name} - 聚类数: {n_clusters}, 轮廓系数: {silhouette_avg:.3f}")
                    else:
                        results[name] = {'error': '无法获取聚类信息'}
                    
            except Exception as e:
                logger.error(f"{name} 聚类质量分析失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def compare_recommendation_accuracy(self, top_k: int = 5) -> Dict[str, Any]:
        """
        比较不同聚类检索器的推荐准确性
        
        Args:
            top_k: 返回的推荐数量
            
        Returns:
            包含推荐准确性对比结果的字典
        """
        logger.info(f"开始推荐准确性对比 (top_k={top_k})...")
        
        results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                similarities = []
                recommendation_counts = []
                
                for query in self.test_queries:
                    recommendations = retriever.recommend(
                        query_question=query,
                        top_k=top_k,
                        similarity_threshold=0.1  # 降低阈值确保有结果
                    )
                    
                    if recommendations:
                        # 检索器返回元组格式: (question, final_score, similarity) 或详细格式
                        # 相似度在元组的最后一个位置
                        avg_similarity = np.mean([r[-1] for r in recommendations])
                        similarities.append(avg_similarity)
                        recommendation_counts.append(len(recommendations))
                
                if similarities:
                    results[name] = {
                        'avg_similarity': np.mean(similarities),
                        'std_similarity': np.std(similarities),
                        'min_similarity': np.min(similarities),
                        'max_similarity': np.max(similarities),
                        'avg_recommendations': np.mean(recommendation_counts),
                        'recommendation_coverage': len([s for s in similarities if s > 0]) / len(self.test_queries)
                    }
                else:
                    results[name] = {
                        'avg_similarity': 0,
                        'recommendation_coverage': 0,
                        'error': '无推荐结果'
                    }
                    
                logger.info(f"{name} 平均相似度: {results[name].get('avg_similarity', 0):.3f}")
                
            except Exception as e:
                logger.error(f"{name} 推荐准确性测试失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def compare_speed(self, num_queries: int = 10) -> Dict[str, Any]:
        """
        比较不同聚类检索器的推理速度
        
        Args:
            num_queries: 测试查询数量
            
        Returns:
            包含速度对比结果的字典
        """
        logger.info(f"开始速度对比 (查询数量={num_queries})...")
        
        test_queries = self.test_queries[:num_queries]
        results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                # 重新测量聚类时间
                start_clustering = time.time()
                retriever.load_candidates(self.test_questions)
                end_clustering = time.time()
                clustering_time = end_clustering - start_clustering
                
                # 测试推荐时间
                recommendation_times = []
                for query in test_queries:
                    start_time = time.time()
                    _ = retriever.recommend(
                        query_question=query, 
                        top_k=5,
                        similarity_threshold=0.1  # 降低阈值确保有结果
                    )
                    end_time = time.time()
                    recommendation_times.append(end_time - start_time)
                
                results[name] = {
                    'clustering_time': clustering_time,
                    'avg_recommendation_time': np.mean(recommendation_times),
                    'std_recommendation_time': np.std(recommendation_times),
                    'total_recommendation_time': np.sum(recommendation_times),
                    'total_time': clustering_time + np.sum(recommendation_times)
                }
                
                logger.info(f"{name} 聚类时间: {clustering_time:.3f}秒, 平均推荐时间: {np.mean(recommendation_times):.3f}秒")
                
            except Exception as e:
                logger.error(f"{name} 速度测试失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def compare_cluster_distribution(self) -> Dict[str, Any]:
        """
        比较不同聚类方法的聚类分布
        
        Returns:
            包含聚类分布对比结果的字典
        """
        logger.info("开始聚类分布对比...")
        
        results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                # 根据检索器类型调用不同的方法
                if name == 'Hierarchical':
                    # 层级聚类检索器需要指定层级
                    cluster_info = retriever.get_cluster_info(level=0)
                    
                    # 从clusters字典中提取聚类大小
                    cluster_sizes = [info['size'] for info in cluster_info['clusters'].values()]
                    
                    if cluster_sizes:
                        results[name] = {
                            'cluster_sizes': cluster_sizes,
                            'avg_cluster_size': np.mean(cluster_sizes),
                            'std_cluster_size': np.std(cluster_sizes),
                            'min_cluster_size': np.min(cluster_sizes),
                            'max_cluster_size': np.max(cluster_sizes),
                            'cluster_size_ratio': np.max(cluster_sizes) / np.min(cluster_sizes) if np.min(cluster_sizes) > 0 else float('inf'),
                            'n_clusters': cluster_info['n_clusters']
                        }
                    else:
                        results[name] = {'error': '无有效聚类'}
                else:
                    # DBSCAN等其他检索器
                    cluster_info = retriever.get_cluster_info()
                    
                    if 'labels' in cluster_info:
                        labels = cluster_info['labels']
                        unique_labels = set(labels)
                        
                        # 计算聚类分布统计
                        cluster_sizes = []
                        for label in unique_labels:
                            if label != -1:  # 排除噪声点
                                size = list(labels).count(label)
                                cluster_sizes.append(size)
                        
                        if cluster_sizes:
                            results[name] = {
                                'cluster_sizes': cluster_sizes,
                                'avg_cluster_size': np.mean(cluster_sizes),
                                'std_cluster_size': np.std(cluster_sizes),
                                'min_cluster_size': np.min(cluster_sizes),
                                'max_cluster_size': np.max(cluster_sizes),
                                'cluster_size_ratio': np.max(cluster_sizes) / np.min(cluster_sizes) if np.min(cluster_sizes) > 0 else float('inf')
                            }
                        else:
                            results[name] = {'error': '无有效聚类'}
                    else:
                        results[name] = {'error': '无法获取聚类标签'}
                
            except Exception as e:
                logger.error(f"{name} 聚类分布分析失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def visualize_comparison(self, clustering_quality: Dict, accuracy_results: Dict, 
                           speed_results: Dict, distribution_results: Dict, save_path: str = None):
        """
        可视化对比结果（SCI论文风格）
        
        Args:
            clustering_quality: 聚类质量结果
            accuracy_results: 推荐准确性结果
            speed_results: 速度结果
            distribution_results: 聚类分布结果
            save_path: 保存图片的路径
        """
        # SCI论文风格设置
        plt.rcParams.update({
            'font.size': 12,
            'axes.linewidth': 1.2,
            'axes.spines.top': False,
            'axes.spines.right': False,
            'axes.grid': True,
            'grid.alpha': 0.3,
            'grid.linewidth': 0.5,
            'legend.frameon': False,
            'legend.fontsize': 10,
            'xtick.direction': 'in',
            'ytick.direction': 'in',
            'xtick.major.size': 4,
            'ytick.major.size': 4,
            'figure.dpi': 300
        })
        
        # SCI论文标准配色方案
        sci_colors = ['#2E86AB', '#A23B72', '#F18F01', '#C73E1D', '#592E83', '#1B998B']
        
        # 创建图形，使用更专业的尺寸比例
        fig, axes = plt.subplots(1, 3, figsize=(15, 5))
        fig.suptitle('Clustering-based Retriever Performance Comparison', 
                    fontsize=14, fontweight='bold', y=0.95)
        
        # 1. 平均相似度对比
        ax1 = axes[0]
        methods = []
        avg_similarities = []
        
        for name, result in accuracy_results.items():
            if isinstance(result, dict) and 'avg_similarity' in result:
                methods.append(name)
                avg_similarities.append(result['avg_similarity'])
        
        if methods:
            bars1 = ax1.bar(methods, avg_similarities, 
                           color=sci_colors[:len(methods)], 
                           edgecolor='black', linewidth=0.8, alpha=0.8)
            ax1.set_title('(a) Average Similarity Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax1.set_ylabel('Average Similarity', fontsize=11)
            ax1.tick_params(axis='x', rotation=45, labelsize=10)
            ax1.tick_params(axis='y', labelsize=10)
            ax1.set_ylim(0, max(avg_similarities) * 1.15)
            
            for bar, val in zip(bars1, avg_similarities):
                ax1.text(bar.get_x() + bar.get_width()/2, bar.get_height() + max(avg_similarities) * 0.02,
                        f'{val:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        else:
            ax1.text(0.5, 0.5, 'No similarity data available', ha='center', va='center', 
                    transform=ax1.transAxes, fontsize=11, style='italic')
            ax1.set_title('(a) Average Similarity Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax1.set_ylabel('Average Similarity', fontsize=11)
        
        # 2. 准确率对比
        ax2 = axes[1]
        methods = []
        accuracies = []
        
        for name, result in accuracy_results.items():
            if isinstance(result, dict):
                # 优先使用hit_at_1（Top-1命中率）作为准确率指标
                if 'hit_at_1' in result:
                    methods.append(name)
                    accuracies.append(result['hit_at_1'])
                elif 'top_1_hit_rate' in result:
                    methods.append(name)
                    accuracies.append(result['top_1_hit_rate'])
                elif 'accuracy' in result:
                    methods.append(name)
                    accuracies.append(result['accuracy'])
        
        if methods:
            bars2 = ax2.bar(methods, accuracies, 
                           color=sci_colors[:len(methods)], 
                           edgecolor='black', linewidth=0.8, alpha=0.8)
            ax2.set_title('(b) Top-1 Hit Rate Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax2.set_ylabel('Top-1 Hit Rate', fontsize=11)
            ax2.tick_params(axis='x', rotation=45, labelsize=10)
            ax2.tick_params(axis='y', labelsize=10)
            ax2.set_ylim(0, max(accuracies) * 1.15)
            
            for bar, val in zip(bars2, accuracies):
                ax2.text(bar.get_x() + bar.get_width()/2, bar.get_height() + max(accuracies) * 0.02,
                        f'{val:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        else:
            ax2.text(0.5, 0.5, 'No accuracy data available', ha='center', va='center', 
                    transform=ax2.transAxes, fontsize=11, style='italic')
            ax2.set_title('(b) Top-1 Hit Rate Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax2.set_ylabel('Top-1 Hit Rate', fontsize=11)
        
        # 3. 聚类时间对比
        ax3 = axes[2]
        methods = []
        clustering_times = []
        
        for name, result in speed_results.items():
            if 'clustering_time' in result:
                methods.append(name)
                clustering_times.append(result['clustering_time'])
        
        if methods:
            bars3 = ax3.bar(methods, clustering_times, 
                           color=sci_colors[:len(methods)], 
                           edgecolor='black', linewidth=0.8, alpha=0.8)
            ax3.set_title('(c) Clustering Time Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax3.set_ylabel('Clustering Time (s)', fontsize=11)
            ax3.tick_params(axis='x', rotation=45, labelsize=10)
            ax3.tick_params(axis='y', labelsize=10)
            ax3.set_ylim(0, max(clustering_times) * 1.15)
            
            for bar, val in zip(bars3, clustering_times):
                ax3.text(bar.get_x() + bar.get_width()/2, bar.get_height() + max(clustering_times) * 0.02,
                        f'{val:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        else:
            ax3.text(0.5, 0.5, 'No clustering time data available', ha='center', va='center', 
                    transform=ax3.transAxes, fontsize=11, style='italic')
            ax3.set_title('(c) Clustering Time Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax3.set_ylabel('Clustering Time (s)', fontsize=11)
        
        plt.tight_layout(pad=2.0)
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight', 
                       facecolor='white', edgecolor='none', format='png')
            logger.info(f"Comparison chart saved to: {save_path}")
        
        plt.show()
    
    def run_full_comparison(self, questions: List[str] = None, queries: List[str] = None, 
                          save_plot: str = None, use_sample_json: bool = True) -> Dict[str, Any]:
        """
        运行完整的对比分析
        
        Args:
            questions: 候选问题列表（当use_sample_json=False时使用）
            queries: 查询问题列表（当use_sample_json=False时使用）
            save_plot: 保存图表的路径
            use_sample_json: 是否使用 sample_questions.json 文件
            
        Returns:
            完整的对比结果
        """
        logger.info("开始完整的聚类检索器对比分析...")
        
        # 设置检索器和数据
        self.setup_retrievers()
        self.load_test_data(use_sample_json, questions, queries)
        
        # 运行各项对比
        clustering_quality = self.compare_clustering_quality()
        accuracy_results = self.compare_accuracy()  # 使用新的准确性计算方法
        recommendation_accuracy = self.compare_recommendation_accuracy()  # 保留原有推荐准确性方法
        speed_results = self.compare_speed()
        distribution_results = self.compare_cluster_distribution()
        
        # 生成可视化
        self.visualize_comparison(clustering_quality, accuracy_results, 
                                speed_results, distribution_results, save_plot)
        
        # 保存聚类信息到reports目录
        reports_dir = "reports"
        if not os.path.exists(reports_dir):
            os.makedirs(reports_dir)
        cluster_info_path = os.path.join(reports_dir, "cluster_info.json")
        cluster_info = self.save_cluster_info(cluster_info_path)
        
        # 生成总结
        summary = self._generate_summary(clustering_quality, recommendation_accuracy, 
                                       speed_results, distribution_results)
        
        results = {
            'clustering_quality': clustering_quality,
            'accuracy': accuracy_results,
            'recommendation_accuracy': recommendation_accuracy,
            'speed': speed_results,
            'distribution': distribution_results,
            'cluster_info': cluster_info,
            'summary': summary
        }
        
        logger.info("聚类推荐器对比分析完成")
        return results
    
    def save_cluster_info(self, save_path: str = "cluster_info.json") -> Dict[str, Any]:
        """
        保存两种聚类方法的详细簇信息
        
        Args:
            save_path: 保存文件路径
            
        Returns:
            聚类信息字典
        """
        import json
        from datetime import datetime
        
        cluster_info = {
            'timestamp': datetime.now().isoformat(),
            'total_questions': len(self.test_questions),
            'methods': {}
        }
        
        for name, retriever in self.retrievers.items():
            try:
                method_info = retriever.get_cluster_info()
                
                if name == 'Hierarchical':
                    # 层级聚类的特殊处理 - 获取所有层级信息
                    cluster_info['methods'][name] = {
                        'type': '层级聚类',
                        **method_info,  # 包含所有层级详细信息
                        'note': '显示每个层级每个簇的所有问题'
                    }
                else:
                    # DBSCAN等其他聚类方法
                    clusters_detail = {}
                    for label, info in method_info.items():
                        if isinstance(info, dict) and 'type' in info:
                            clusters_detail[str(label)] = {
                                'type': info['type'],
                                'size': info['size'],
                                'questions': info['questions'],
                                'avg_score': info.get('avg_score', 1.0)
                            }
                    
                    cluster_info['methods'][name] = {
                        'type': 'DBSCAN聚类',
                        'clusters': clusters_detail,
                        'eps': getattr(retriever, 'eps', 0.15),
                        'min_samples': getattr(retriever, 'min_samples', 2),
                        'n_clusters': len([k for k, v in clusters_detail.items() if k != '-1']),
                        'n_noise': clusters_detail.get('-1', {}).get('size', 0),
                        'note': '显示每个簇的所有问题'
                    }
                
                logger.info(f"✅ 已保存 {name} 聚类信息")
                
            except Exception as e:
                logger.error(f"保存 {name} 聚类信息失败: {e}")
                cluster_info['methods'][name] = {'error': str(e)}
        
        # 保存到文件
        try:
            with open(save_path, 'w', encoding='utf-8') as f:
                json.dump(cluster_info, f, ensure_ascii=False, indent=2)
            logger.info(f"✅ 聚类信息已保存到: {save_path}")
        except Exception as e:
            logger.error(f"保存聚类信息到文件失败: {e}")
        
        # 为层次聚类生成树状图
        for name, retriever in self.retrievers.items():
            if name == 'Hierarchical' and hasattr(retriever, 'plot_dendrogram'):
                try:
                    dendrogram_path = save_path.replace('.json', '_hierarchical_dendrogram.png')
                    retriever.plot_dendrogram(
                        figsize=(20, 12),
                        save_path=dendrogram_path,
                        show_labels=True if len(self.test_questions) <= 50 else False
                    )
                    logger.info(f"✅ 层次聚类树状图已保存到: {dendrogram_path}")
                except Exception as e:
                    logger.warning(f"⚠️ 生成层次聚类树状图失败: {e}")
        
        return cluster_info
    
    def _generate_summary(self, clustering_quality: Dict, accuracy_results: Dict, 
                         speed_results: Dict, distribution_results: Dict) -> Dict[str, str]:
        """
        生成对比结果总结
        
        Args:
            clustering_quality: 聚类质量结果
            accuracy_results: 准确性结果
            speed_results: 速度结果
            distribution_results: 分布结果
            
        Returns:
            总结信息
        """
        summary = {}
        
        # 找出聚类质量最好的方法
        best_clustering = None
        best_silhouette = -1
        for name, result in clustering_quality.items():
            if 'silhouette_score' in result and result['silhouette_score'] > best_silhouette:
                best_silhouette = result['silhouette_score']
                best_clustering = name
        
        # 找出推荐准确性最好的方法
        best_accuracy = None
        best_accuracy_score = -1
        for name, result in accuracy_results.items():
            # 兼容新旧数据结构
            score = result.get('avg_similarity', result.get('top1_hit_rate', 0))
            if score > best_accuracy_score:
                best_accuracy_score = score
                best_accuracy = name
        
        # 找出速度最快的方法
        fastest_method = None
        fastest_time = float('inf')
        for name, result in speed_results.items():
            if 'total_time' in result and result['total_time'] < fastest_time:
                fastest_time = result['total_time']
                fastest_method = name
        
        summary['best_clustering_quality'] = best_clustering
        summary['best_accuracy'] = best_accuracy
        summary['fastest_method'] = fastest_method
        summary['best_silhouette_score'] = f"{best_silhouette:.3f}"
        summary['best_accuracy_score'] = f"{best_accuracy_score:.3f}"
        summary['fastest_time'] = f"{fastest_time:.3f}秒"
        
        return summary


def main():
    """
    主函数：运行聚类方法比较
    """
    try:
        # 创建reports目录
        reports_dir = "reports"
        if not os.path.exists(reports_dir):
            os.makedirs(reports_dir)
        
        # 从统一数据文件加载问题数据
        try:
            sample_questions = load_sample_questions(71)  # 加载8个示例问题
            print(f"✅ 成功加载 {len(sample_questions)} 个候选问题")
        except Exception as e:
            print(f"❌ 加载问题数据失败: {e}")
            exit(1)
        
        # 创建对比器
        comparator = ClusteringMethodComparison()
        
        # 运行完整对比
        results = comparator.run_full_comparison(
            questions=sample_questions,
            save_plot=os.path.join(reports_dir, "clustering_comparison_results.png")
        )
        
        print("\n=== 对比结果总结 ===")
        print(f"聚类质量最好的方法: {results['summary']['best_clustering_quality']}")
        print(f"检索准确性最好的方法: {results['summary']['best_accuracy']}")
        print(f"速度最快的方法: {results['summary']['fastest_method']}")
        
        # 显示聚类信息保存结果
        print("\n=== 聚类信息保存 ===")
        cluster_info = results.get('cluster_info', {})
        for method_name, method_data in cluster_info.get('methods', {}).items():
            if 'error' not in method_data:
                if method_name == 'Hierarchical':
                    print(f"{method_name}: {method_data.get('levels', 0)} levels, type: {method_data['type']}")
                    print(f"  Linkage method: {method_data.get('linkage_method', 'N/A')}")
                    print(f"  Outlier detection: {method_data.get('outlier_detection', False)}")
                    # Display cluster count for each level
                    level_data = method_data.get('level_data', {})
                    for level, data in level_data.items():
                        print(f"  Level {level}: {data.get('n_clusters', 0)} clusters (threshold: {data.get('threshold', 'N/A')})")
                else:
                    print(f"{method_name}: {method_data.get('n_clusters', 0)} clusters, type: {method_data['type']}")
                    if method_name == 'DBSCAN' and method_data.get('n_noise', 0) > 0:
                        print(f"  Noise points: {method_data['n_noise']}")
            else:
                print(f"{method_name}: Save failed - {method_data['error']}")
        print(f"Detailed cluster info saved to: {os.path.join(reports_dir, 'cluster_info.json')}")
        print(f"Hierarchical clustering dendrogram saved to: {os.path.join(reports_dir, 'cluster_info_hierarchical_dendrogram.png')}")
        
        # 使用固定测试问题展示检索效果
        print("\n=== 固定测试问题检索效果对比 ===")
        fixed_query = "大伙房水库的管理部门是什么？"
        print(f"测试问题: {fixed_query}")
        
        # DBSCAN聚类推荐器检索结果（优化：重用推荐器实例）
        print("\n🔍 DBSCAN聚类检索器检索结果:")
        try:
            # 重用已创建的检索器实例
            dbscan_results = comparator.retrievers['DBSCAN'].recommend(
                fixed_query, 
                top_k=5, 
                return_detailed_scores=True,
                similarity_threshold=0.1
            )
            
            for i, result in enumerate(dbscan_results, 1):
                question, final_score, base_score, cluster_bonus, similarity = result
                print(f"  {i}. {question}")
                print(f"     相似度: {similarity:.3f}, 最终分数: {final_score:.3f} (基础: {base_score:.3f}, 簇加权: {cluster_bonus:.3f})")
        except Exception as e:
            print(f"  ❌ DBSCAN方法执行失败: {e}")
        
        # 层级聚类推荐器检索结果（优化：重用推荐器实例）
        print("\n🔍 层级聚类检索器检索结果:")
        try:
            # 重用已创建的检索器实例
            hierarchical_results = comparator.retrievers['Hierarchical'].recommend(
                fixed_query, 
                top_k=5, 
                return_detailed_scores=True,
                similarity_threshold=0.1
            )
            
            for i, result in enumerate(hierarchical_results, 1):
                question, final_score, base_score, cluster_bonus, similarity, level_details, outlier_info = result
                print(f"  {i}. {question}")
                print(f"     相似度: {similarity:.3f}, 最终分数: {final_score:.3f} (基础: {base_score:.3f}, 簇加权: {cluster_bonus:.3f})")
                print(f"     异常值: {outlier_info['is_outlier']}, 层级详情: {level_details}")
        except Exception as e:
            print(f"  ❌ 层级聚类方法执行失败: {e}")
        
        logger.info("\n✅ 聚类方法比较完成！")
        
    except Exception as e:
        logger.error(f"❌ 运行过程中出现错误: {str(e)}")
        raise


if __name__ == "__main__":
    main()