# -*- coding: utf-8 -*-
"""
FAISS检索器对比分析工具

本模块提供对FAISS-based检索器的全面性能对比，包括：
- 查询速度对比
- 内存使用对比  
- 准确性对比
- 可扩展性分析

支持的检索器:
- FlatIndexRetriever: 精确搜索，100%召回率
- IVFIndexRetriever: 倒排文件索引，近似搜索
- HNSWIndexRetriever: 层次导航小世界图，高速搜索

作者: QA Retrieval Team
版本: 1.0.0
"""

import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import platform
import pandas as pd
from typing import List, Dict, Any, Tuple
from loguru import logger
import sys
import os
import tempfile

# 启用调试日志
logger.remove()
logger.add(sys.stderr, level="DEBUG", format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {name}:{function}:{line} - {message}")

# 添加项目根目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 配置HuggingFace镜像
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["HUGGINGFACE_HUB_CACHE"] = os.path.expanduser("~/.cache/huggingface")
os.environ["SENTENCE_TRANSFORMERS_HOME"] = os.path.expanduser("~/.cache/sentence_transformers")

from questionretrieval.retrievers.faiss import (
    FlatIndexRetriever,
    IVFIndexRetriever, 
    HNSWIndexRetriever
)
from questionretrieval.io.data_loader import load_questions

# 设置中文字体支持
def setup_chinese_font():
    """设置matplotlib中文字体支持"""
    try:
        if platform.system() == 'Windows':
            plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei']
        elif platform.system() == 'Darwin':
            plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'PingFang SC']
        else:
            plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'WenQuanYi Micro Hei']
        plt.rcParams['axes.unicode_minus'] = False
        logger.info("✅ 中文字体设置成功")
    except Exception as e:
        logger.warning(f"中文字体设置失败: {e}")

setup_chinese_font()


class FaissComparisonAnalyzer:
    """FAISS检索器对比分析器"""
    
    def __init__(self):
        """初始化对比分析器"""
        self.retrievers = {}
        self.test_questions = []
        self.test_queries = []
        self.results = {}
        
    def setup_test_data(self, data_size: str = "small"):
        """设置测试数据"""
        logger.info(f"设置{data_size}规模测试数据...")
        
        # 从sample_questions.json加载真实数据
        try:
            import json
            import os
            questions_path = os.path.join(os.path.dirname(__file__), 'sample_questions.json')
            
            with open(questions_path, 'r', encoding='utf-8') as f:
                self.test_data = json.load(f)
            
            # 提取query字段作为问题列表
            base_questions = [item['query'] for item in self.test_data]
            
            if not base_questions:
                logger.warning("⚠️  无法从sample_questions.json加载数据，使用默认测试数据")
                self.test_data = [
                    {"query": "大伙房水库的总库容是多少？", "expected_answer": "22.68亿立方米"},
                    {"query": "大伙房水库的设计标准是什么？", "expected_answer": "千年一遇洪水设计"},
                    {"query": "大伙房水库位于哪个流域？", "expected_answer": "浑河流域"},
                    {"query": "大伙房水库为哪些城市提供供水？", "expected_answer": "沈阳、抚顺、辽阳等"},
                    {"query": "大伙房水库的防洪调度原则是什么？", "expected_answer": "安全第一，综合利用"}
                ]
                base_questions = [item['query'] for item in self.test_data]
                
            logger.info(f"✓ 成功加载 {len(base_questions)} 个真实问题数据")
            
        except Exception as e:
            logger.warning(f"⚠️  加载sample_questions.json失败: {e}，使用默认测试数据")
            self.test_data = [
                {"query": "大伙房水库的总库容是多少？", "expected_answer": "22.68亿立方米"},
                {"query": "大伙房水库的设计标准是什么？", "expected_answer": "千年一遇洪水设计"},
                {"query": "大伙房水库位于哪个流域？", "expected_answer": "浑河流域"},
                {"query": "大伙房水库为哪些城市提供供水？", "expected_answer": "沈阳、抚顺、辽阳等"},
                {"query": "大伙房水库的防洪调度原则是什么？", "expected_answer": "安全第一，综合利用"}
            ]
            base_questions = [item['query'] for item in self.test_data]
        
        # 根据数据规模调整
        if data_size == "small":
            self.test_questions = base_questions[:min(15, len(base_questions))]
        elif data_size == "medium":
            # 复制扩展中等规模
            repeat_count = max(2, 50 // len(base_questions)) if len(base_questions) < 25 else 2
            self.test_questions = (base_questions * repeat_count)[:50]
        elif data_size == "large":
            # 复制扩展大规模
            repeat_count = max(4, 200 // len(base_questions)) if len(base_questions) < 50 else 4
            self.test_questions = (base_questions * repeat_count)[:200]
            
        # 从base_questions中选择前5个问题作为测试查询
        self.test_queries = base_questions[:5]
        
        # 确保test_queries中的每个查询都能在test_data中找到对应的expected_answer
        valid_queries = []
        for query in self.test_queries:
            for item in self.test_data:
                if item.get('query') == query:
                    valid_queries.append(query)
                    break
        self.test_queries = valid_queries
        
        logger.info(f"✓ 测试数据准备完成: {len(self.test_questions)} 个问题, {len(self.test_queries)} 个查询")
    
    def setup_retrievers(self):
        """初始化所有FAISS检索器"""
        logger.info("初始化FAISS检索器...")
        
        # 从questionretrieval/io/questions.json加载候选问题库
        try:
            import json
            questions_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'questionretrieval', 'io', 'questions.json')
            
            with open(questions_path, 'r', encoding='utf-8') as f:
                self.candidate_questions = json.load(f)
            
            logger.info(f"✓ 从questions.json加载候选问题库: {len(self.candidate_questions)} 个问题")
            
        except Exception as e:
            logger.warning(f"⚠️  无法加载questions.json: {e}，使用测试数据作为候选问题库")
            # 备用方案：使用测试数据中的expected_answer作为候选问题库
            candidate_questions = set()
            for item in self.test_data:
                if 'expected_answer' in item:
                    candidate_questions.add(item['expected_answer'])
            self.candidate_questions = list(candidate_questions)
            logger.info(f"✓ 使用备用候选问题库: {len(self.candidate_questions)} 个问题")
        
        retriever_configs = {
            'Flat': {
                'class': FlatIndexRetriever,
                'params': {'metric': 'cosine'}
            },
            'IVF': {
                'class': IVFIndexRetriever, 
                'params': {
                    'nlist': max(10, len(self.candidate_questions) // 10),
                    'nprobe': 5,
                    'metric': 'cosine'
                }
            },
            'HNSW': {
                'class': HNSWIndexRetriever,
                'params': {
                    'M': 16,
                    'efConstruction': 200,
                    'efSearch': 50,
                    'metric': 'cosine'
                }
            },

        }
        
        for name, config in retriever_configs.items():
            try:
                retriever = config['class'](**config['params'])
                retriever.load_candidates(self.candidate_questions)
                self.retrievers[name] = retriever
                logger.info(f"✓ {name} 检索器初始化成功")
            except Exception as e:
                logger.error(f"✗ {name} 检索器初始化失败: {e}")
    
    def benchmark_build_time(self) -> Dict[str, float]:
        """测试索引构建时间"""
        logger.info("测试索引构建时间...")
        
        build_times = {}
        
        for name, retriever in self.retrievers.items():
            try:
                # 重新构建索引以测试时间，使用原有的候选问题库而不是测试问题
                start_time = time.time()
                retriever.load_candidates(self.candidate_questions)
                build_time = time.time() - start_time
                build_times[name] = build_time
                logger.info(f"{name}: {build_time:.3f}s")
            except Exception as e:
                logger.error(f"{name} 构建时间测试失败: {e}")
                build_times[name] = -1
        
        return build_times
    
    def benchmark_query_speed(self, top_k: int = 5) -> Dict[str, Dict[str, float]]:
        """测试查询速度"""
        logger.info("测试查询速度...")
        
        query_results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                # 预热
                _ = retriever.recommend(self.test_queries[0], top_k=1)
                
                # 批量查询测试
                start_time = time.time()
                for query in self.test_queries:
                    retriever.recommend(query, top_k=top_k)
                total_time = time.time() - start_time
                
                avg_time = total_time / len(self.test_queries)
                queries_per_second = len(self.test_queries) / total_time
                
                query_results[name] = {
                    'total_time': total_time,
                    'avg_time_per_query': avg_time,
                    'queries_per_second': queries_per_second
                }
                
                logger.info(f"{name}: {avg_time:.3f}s/查询, {queries_per_second:.2f} 查询/秒")
                
            except Exception as e:
                logger.error(f"{name} 查询速度测试失败: {e}")
                query_results[name] = {'error': str(e)}
        
        return query_results
    
    def benchmark_memory_usage(self) -> Dict[str, float]:
        """测试内存使用"""
        logger.info("测试内存使用...")
        
        memory_usage = {}
        
        for name, retriever in self.retrievers.items():
            try:
                info = retriever.get_index_info()
                if 'memory_usage_mb' in info:
                    memory_mb = info['memory_usage_mb']
                else:
                    # 估算内存使用
                    total_vectors = info.get('total_vectors', 0)
                    dimension = info.get('dimension', 384)
                    memory_mb = (total_vectors * dimension * 4) / (1024 * 1024)  # float32
                
                memory_usage[name] = memory_mb
                logger.info(f"{name}: {memory_mb:.2f} MB")
                
            except Exception as e:
                logger.error(f"{name} 内存测试失败: {e}")
                memory_usage[name] = -1
        
        return memory_usage
    
    def test_accuracy_consistency(self, top_k: int = 5) -> Dict[str, float]:
        """使用测试数据集计算Top-1准确率"""
        logger.info("使用测试数据集计算Top-1准确率...")

        # 从data_utils导入计算函数
        from data_utils import calculate_retrieval_accuracy

        # 直接使用self.test_data中的所有问题对
        question_pairs = self.test_data

        if not question_pairs:
            logger.warning("没有找到测试数据，跳过准确性测试")
            return {}

        accuracy_scores = {}
        retrieval_results = {}  # 存储每个检索器的详细结果

        for name, retriever in self.retrievers.items():
            try:
                metrics = calculate_retrieval_accuracy(retriever, question_pairs, top_k=top_k)
                accuracy_scores[name] = metrics['hit_at_1']  # 使用Top-1准确率
                logger.info(f"{name}: Top-1准确率 {metrics['hit_at_1']:.3f} ({metrics['hit_at_1']*100:.1f}%)")
                
                # 收集详细的检索结果
                detailed_results = []
                for pair in question_pairs[:]:  # 只保存前10个查询的详细结果
                    query = pair['query']
                    expected = pair['expected_answer']
                    
                    try:
                        if hasattr(retriever, 'recommend'):
                            results = retriever.recommend(query, top_k=top_k)
                        elif hasattr(retriever, 'find_similar_questions'):
                            results = retriever.find_similar_questions(query, top_k=top_k)
                        else:
                            continue
                            
                        # 格式化结果
                        if isinstance(results[0], tuple):
                            formatted_results = [{'question': r[0], 'similarity': r[1]} for r in results]
                        elif isinstance(results[0], dict):
                            formatted_results = results
                        else:
                            formatted_results = [{'question': str(r), 'similarity': 1.0} for r in results]
                            
                        detailed_results.append({
                            'query': query,
                            'expected_answer': expected,
                            'retrieved_results': formatted_results
                        })
                    except Exception as e:
                        logger.warning(f"获取 {name} 检索器详细结果失败: {e}")
                        
                retrieval_results[name] = {
                    'metrics': metrics,
                    'detailed_results': detailed_results
                }

            except Exception as e:
                logger.error(f"{name} 准确性测试失败: {e}")
                accuracy_scores[name] = 0
                retrieval_results[name] = {'error': str(e)}
        
        # 保存检索结果到文件
        self._save_retrieval_results(retrieval_results)

        return accuracy_scores
    
    def _save_retrieval_results(self, retrieval_results: Dict[str, Any]):
        """保存检索结果到JSON文件"""
        try:
            import json
            reports_dir = r'e:\qa_retrieval\qa_retrieval\reports'
            os.makedirs(reports_dir, exist_ok=True)
            
            # 创建retrieval_results子目录
            results_dir = os.path.join(reports_dir, 'retrieval_results')
            os.makedirs(results_dir, exist_ok=True)
            
            # 为每个检索器保存单独的JSON文件
            for retriever_name, results in retrieval_results.items():
                filename = f"{retriever_name}_faiss_results.json"
                filepath = os.path.join(results_dir, filename)
                
                with open(filepath, 'w', encoding='utf-8') as f:
                    json.dump(results, f, ensure_ascii=False, indent=2)
                
                logger.info(f"✅ {retriever_name} 检索结果已保存为 {filepath}")
            
            # 保存汇总结果
            summary_file = os.path.join(reports_dir, 'faiss_retrieval_summary.json')
            summary_data = {
                'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
                'retrievers': list(retrieval_results.keys()),
                'metrics_summary': {
                    name: results.get('metrics', {}) if 'error' not in results else {'error': results['error']}
                    for name, results in retrieval_results.items()
                }
            }
            
            with open(summary_file, 'w', encoding='utf-8') as f:
                json.dump(summary_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"✅ 检索结果汇总已保存为 {summary_file}")
            
        except Exception as e:
            logger.error(f"保存检索结果失败: {e}")
    
    def run_comprehensive_benchmark(self, data_size: str = "medium", top_k: int = 5) -> Dict[str, Any]:
        """运行全面基准测试"""
        logger.info("\n" + "="*60)
        logger.info("开始FAISS检索器全面基准测试")
        logger.info("="*60)
        
        # 设置测试环境
        self.setup_test_data(data_size)
        self.setup_retrievers()
        
        if not self.retrievers:
            logger.error("没有可用的检索器，测试终止")
            return {}
        
        # 运行各项测试
        results = {
            'build_times': self.benchmark_build_time(),
            'query_speed': self.benchmark_query_speed(top_k),
            'memory_usage': self.benchmark_memory_usage(),
            'accuracy': self.test_accuracy_consistency(top_k)
        }
        
        # 创建综合评分
        self.create_comprehensive_score(results)
        
        # 生成可视化报告
        self.generate_visualization_report(results)
        
        logger.info("✅ 全面基准测试完成")
        return results
    
    def create_comprehensive_score(self, results: Dict[str, Any]):
        """创建综合评分"""
        logger.info("创建综合评分...")
        
        # 标准化评分（0-100分）
        comprehensive_scores = {}
        
        # 速度评分（越快越高）
        if 'query_speed' in results:
            speed_data = {k: v.get('queries_per_second', 0) for k, v in results['query_speed'].items() if 'error' not in v}
            if speed_data:
                max_speed = max(speed_data.values())
                for name, speed in speed_data.items():
                    comprehensive_scores[name] = comprehensive_scores.get(name, {})
                    comprehensive_scores[name]['speed_score'] = (speed / max_speed) * 100
        
        # 内存评分（越小越高）
        if 'memory_usage' in results:
            memory_data = {k: v for k, v in results['memory_usage'].items() if v > 0}
            if memory_data:
                min_memory = min(memory_data.values())
                for name, memory in memory_data.items():
                    comprehensive_scores[name] = comprehensive_scores.get(name, {})
                    comprehensive_scores[name]['memory_score'] = (min_memory / memory) * 100
        
        # 准确性评分（越高越好）
        if 'accuracy' in results:
            accuracy_data = results['accuracy']
            if accuracy_data:
                max_accuracy = max(accuracy_data.values())
                if max_accuracy > 0:  # 避免除零错误
                    for name, accuracy in accuracy_data.items():
                        comprehensive_scores[name] = comprehensive_scores.get(name, {})
                        comprehensive_scores[name]['accuracy_score'] = (accuracy / max_accuracy) * 100
                else:
                    # 如果所有准确率都是0，给所有方法相同的分数
                    for name in accuracy_data.keys():
                        comprehensive_scores[name] = comprehensive_scores.get(name, {})
                        comprehensive_scores[name]['accuracy_score'] = 50  # 中性分数
        
        # 计算综合评分
        for name in comprehensive_scores:
            scores = comprehensive_scores[name]
            total_score = (
                scores.get('speed_score', 0) * 0.4 +
                scores.get('memory_score', 0) * 0.3 +
                scores.get('accuracy_score', 0) * 0.3
            )
            comprehensive_scores[name]['total_score'] = total_score
        
        self.results['comprehensive_scores'] = comprehensive_scores
        logger.info("✅ 综合评分创建完成")
    
    def generate_visualization_report(self, results: Dict[str, Any]):
        """生成可视化报告"""
        logger.info("生成可视化报告...")
        
        try:
            # 设置SCI论文风格
            plt.rcParams.update({
                'font.family': 'Times New Roman',
                'font.size': 10,
                'axes.linewidth': 1.2,
                'axes.spines.top': False,
                'axes.spines.right': False,
                'axes.grid': True,
                'grid.alpha': 0.3,
                'grid.linewidth': 0.8,
                'legend.frameon': True,
                'legend.fancybox': True,
                'legend.shadow': True,
                'legend.framealpha': 0.9,
                'xtick.direction': 'in',
                'ytick.direction': 'in',
                'xtick.major.size': 4,
                'ytick.major.size': 4,
                'figure.dpi': 300
            })
            
            # SCI论文标准配色方案
            sci_colors = ['#2E86AB', '#A23B72', '#F18F01', '#C73E1D', '#592E83', '#1B998B']
            
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            fig.suptitle('FAISS Index Performance Comparison', fontsize=14, fontweight='bold', y=0.95)
            
            # 1. 构建时间对比
            if 'build_times' in results:
                build_data = {k: v for k, v in results['build_times'].items() if v >= 0}
                if build_data:
                    bars = axes[0, 0].bar(build_data.keys(), build_data.values(), 
                                        color=sci_colors[:len(build_data)], 
                                        edgecolor='black', linewidth=0.8, alpha=0.8)
                    axes[0, 0].set_title('(a) Index Build Time Comparison', fontweight='bold', fontsize=12, pad=15)
                    axes[0, 0].set_ylabel('Build Time (seconds)', fontsize=11)
                    axes[0, 0].tick_params(axis='x', rotation=45, labelsize=10)
                    axes[0, 0].tick_params(axis='y', labelsize=10)
                    axes[0, 0].set_ylim(0, max(build_data.values()) * 1.15)
                    # 添加数值标签
                    for bar in bars:
                        height = bar.get_height()
                        axes[0, 0].text(bar.get_x() + bar.get_width()/2., height + max(build_data.values()) * 0.02,
                                      f'{height:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
            
            # 2. 查询速度对比
            if 'query_speed' in results:
                speed_data = {k: v.get('queries_per_second', 0) for k, v in results['query_speed'].items() if 'error' not in v}
                if speed_data:
                    bars = axes[0, 1].bar(speed_data.keys(), speed_data.values(), 
                                        color=sci_colors[:len(speed_data)], 
                                        edgecolor='black', linewidth=0.8, alpha=0.8)
                    axes[0, 1].set_title('(b) Query Speed Comparison', fontweight='bold', fontsize=12, pad=15)
                    axes[0, 1].set_ylabel('Queries per Second', fontsize=11)
                    axes[0, 1].tick_params(axis='x', rotation=45, labelsize=10)
                    axes[0, 1].tick_params(axis='y', labelsize=10)
                    axes[0, 1].set_ylim(0, max(speed_data.values()) * 1.15)
                    for bar in bars:
                        height = bar.get_height()
                        axes[0, 1].text(bar.get_x() + bar.get_width()/2., height + max(speed_data.values()) * 0.02,
                                      f'{height:.1f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
            
            # 3. 内存使用对比
            if 'memory_usage' in results:
                memory_data = {k: v for k, v in results['memory_usage'].items() if v >= 0}
                if memory_data:
                    bars = axes[1, 0].bar(memory_data.keys(), memory_data.values(), 
                                        color=sci_colors[:len(memory_data)], 
                                        edgecolor='black', linewidth=0.8, alpha=0.8)
                    axes[1, 0].set_title('(c) Memory Usage Comparison', fontweight='bold', fontsize=12, pad=15)
                    axes[1, 0].set_ylabel('Memory Usage (MB)', fontsize=11)
                    axes[1, 0].tick_params(axis='x', rotation=45, labelsize=10)
                    axes[1, 0].tick_params(axis='y', labelsize=10)
                    axes[1, 0].set_ylim(0, max(memory_data.values()) * 1.15)
                    for bar in bars:
                        height = bar.get_height()
                        axes[1, 0].text(bar.get_x() + bar.get_width()/2., height + max(memory_data.values()) * 0.02,
                                      f'{height:.2f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
            
            # 4. 准确性对比
            if 'accuracy' in results and results['accuracy']:
                accuracy_data = results['accuracy']
                if accuracy_data:
                    bars = axes[1, 1].bar(accuracy_data.keys(), accuracy_data.values(), 
                                        color=sci_colors[:len(accuracy_data)], 
                                        edgecolor='black', linewidth=0.8, alpha=0.8)
                    axes[1, 1].set_title('(d) Top-1 Accuracy Comparison', fontweight='bold', fontsize=12, pad=15)
                    axes[1, 1].set_ylabel('Top-1 Accuracy', fontsize=11)
                    axes[1, 1].tick_params(axis='x', rotation=45, labelsize=10)
                    axes[1, 1].tick_params(axis='y', labelsize=10)
                    max_value = max(accuracy_data.values()) if max(accuracy_data.values()) > 0 else 1.0
                    axes[1, 1].set_ylim(0, max_value * 1.15)
                    for bar in bars:
                        height = bar.get_height()
                        axes[1, 1].text(bar.get_x() + bar.get_width()/2., height + max_value * 0.02,
                                      f'{height:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
                else:
                    # 如果没有准确性数据，显示提示信息
                    axes[1, 1].text(0.5, 0.5, 'No Accuracy Data Available\n(Check test data setup)', 
                                   ha='center', va='center', transform=axes[1, 1].transAxes,
                                   fontsize=12, bbox=dict(boxstyle='round', facecolor='lightgray', alpha=0.8))
                    axes[1, 1].set_title('(d) Top-1 Accuracy Comparison', fontweight='bold', fontsize=12, pad=15)
                    axes[1, 1].set_ylabel('Top-1 Accuracy', fontsize=11)
            else:
                # 如果没有准确性数据，显示提示信息
                axes[1, 1].text(0.5, 0.5, 'No Accuracy Data Available\n(Check test data setup)', 
                               ha='center', va='center', transform=axes[1, 1].transAxes,
                               fontsize=12, bbox=dict(boxstyle='round', facecolor='lightgray', alpha=0.8))
                axes[1, 1].set_title('(d) Top-1 Accuracy Comparison', fontweight='bold', fontsize=12, pad=15)
                axes[1, 1].set_ylabel('Top-1 Accuracy', fontsize=11)
            
            # 调整布局
            plt.tight_layout(pad=2.0)
            
            # 保存图表
            try:
                # 使用指定的reports目录
                reports_dir = r'e:\qa_retrieval\qa_retrieval\reports'
                os.makedirs(reports_dir, exist_ok=True)
                
                save_path = os.path.join(reports_dir, 'faiss_comparison_report.png')
                plt.savefig(save_path, dpi=300, bbox_inches='tight', 
                           facecolor='white', edgecolor='none')
                logger.info(f"✅ 可视化报告已保存为 {save_path}")
            except Exception as e:
                logger.warning(f"图表保存失败: {e}")
            
            plt.show()
            
        except Exception as e:
            logger.error(f"可视化报告生成失败: {e}")
    
    def print_summary_report(self):
        """打印总结报告"""
        if not hasattr(self, 'results') or not self.results:
            logger.warning("没有可用的测试结果")
            return
        
        logger.info("\n" + "="*80)
        logger.info("FAISS检索器对比分析总结报告")
        logger.info("="*80)
        
        # 综合评分排序
        if 'comprehensive_scores' in self.results:
            scores = self.results['comprehensive_scores']
            sorted_retrievers = sorted(scores.items(), key=lambda x: x[1].get('total_score', 0), reverse=True)
            
            logger.info("\n📊 综合性能排名:")
            for rank, (name, score_data) in enumerate(sorted_retrievers, 1):
                total_score = score_data.get('total_score', 0)
                logger.info(f"{rank}. {name}: {total_score:.1f}分")
                logger.info(f"   速度: {score_data.get('speed_score', 0):.1f}分")
                logger.info(f"   内存: {score_data.get('memory_score', 0):.1f}分") 
                logger.info(f"   Top-1准确率: {score_data.get('accuracy_score', 0):.1f}分")
        
        # 使用建议
        logger.info("\n💡 使用建议:")
        logger.info("• 小规模数据 (<1000): 推荐使用 Flat 或 HNSW")
        logger.info("• 中等规模数据 (1000-10000): 推荐使用 HNSW 或 IVF")
        logger.info("• 大规模数据 (>10000): 推荐使用 IVF")
        logger.info("• 内存受限场景: 推荐使用 IVF")
        logger.info("• Top-1准确率优先: 推荐使用 Flat 或 HNSW")
        logger.info("• 速度优先: 推荐使用 HNSW")
        
        logger.info("\n" + "="*80)


def main():
    """主函数"""
    analyzer = FaissComparisonAnalyzer()
    
    # 运行全面基准测试
    results = analyzer.run_comprehensive_benchmark(data_size="medium", top_k=5)
    
    # 打印总结报告
    analyzer.print_summary_report()
    
    return results


if __name__ == "__main__":
    main()