# -*- coding: utf-8 -*-
"""
评估器模块
用于评估不同检索方法的性能
"""

import json
import os
import time
from typing import Dict, List, Any, Tuple, Optional
from loguru import logger
from collections import defaultdict
import statistics
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime

from .retrieval_methods import (
    VectorRetriever, KeywordRetriever, HybridRetriever,
    KnowledgeGraphRetriever, RerankRetriever, MultiStepRetriever,
    StructuredRetriever, RetrievalResult
)
from .data_manager import DataManager
from .ollama_client import ollama_client
from .config import RETRIEVAL_METHODS, EVALUATION_METRICS


class EvaluationResult:
    """评估结果"""
    
    def __init__(self, method_name: str):
        self.method_name = method_name
        self.metrics = {}
        self.detailed_results = []
        self.performance_stats = {}
        self.error_count = 0
        self.total_queries = 0
        
        # 检索质量指标
        self.retrieval_precision = 0.0
        self.retrieval_recall = 0.0
        self.retrieval_f1 = 0.0
        self.relevance_score = 0.0
        self.coverage_score = 0.0
        
    def add_result(self, question: str, expected_answer: str, 
                   retrieval_result: RetrievalResult, evaluation_scores: Dict[str, float]):
        """添加单个评估结果"""
        result = {
            'question': question,
            'expected_answer': expected_answer,
            'actual_answer': retrieval_result.answer,
            'confidence': retrieval_result.confidence,
            'response_time': retrieval_result.response_time,
            'retrieved_docs_count': len(retrieval_result.retrieved_docs),
            'evaluation_scores': evaluation_scores,
            'metadata': retrieval_result.metadata
        }
        self.detailed_results.append(result)
        self.total_queries += 1
        
        if not retrieval_result.answer:
            self.error_count += 1
    
    def calculate_metrics(self):
        """计算总体指标"""
        if not self.detailed_results:
            return
        
        # 精确度 (基于评估分数)
        precision_scores = [r['evaluation_scores'].get('precision', 0) for r in self.detailed_results]
        self.metrics['precision'] = statistics.mean(precision_scores) if precision_scores else 0
        
        # 召回率
        recall_scores = [r['evaluation_scores'].get('recall', 0) for r in self.detailed_results]
        self.metrics['recall'] = statistics.mean(recall_scores) if recall_scores else 0
        
        # F1分数
        f1_scores = [r['evaluation_scores'].get('f1', 0) for r in self.detailed_results]
        self.metrics['f1'] = statistics.mean(f1_scores) if f1_scores else 0
        
        # 相关性
        relevance_scores = [r['evaluation_scores'].get('relevance', 0) for r in self.detailed_results]
        self.metrics['relevance'] = statistics.mean(relevance_scores) if relevance_scores else 0
        
        # 响应速度
        response_times = [r['response_time'] for r in self.detailed_results if r['response_time'] > 0]
        self.metrics['avg_response_time'] = statistics.mean(response_times) if response_times else 0
        self.metrics['median_response_time'] = statistics.median(response_times) if response_times else 0
        
        # 置信度
        confidences = [r['confidence'] for r in self.detailed_results]
        self.metrics['avg_confidence'] = statistics.mean(confidences) if confidences else 0
        
        # 成功率
        self.metrics['success_rate'] = (self.total_queries - self.error_count) / self.total_queries if self.total_queries > 0 else 0
        
        # 检索文档数量
        doc_counts = [r['retrieved_docs_count'] for r in self.detailed_results]
        self.metrics['avg_retrieved_docs'] = statistics.mean(doc_counts) if doc_counts else 0
    
    def get_overall_score(self) -> float:
        """计算综合评分"""
        # 性能权重：30%
        avg_response_time = self.metrics.get('avg_response_time', 0.1)
        performance_score = min(1.0, 5.0 / max(avg_response_time, 0.1))  # 5秒内为满分
        
        # 检索质量权重：70%
        quality_score = (
            self.retrieval_precision * 0.3 +
            self.retrieval_recall * 0.3 +
            self.retrieval_f1 * 0.2 +
            self.relevance_score * 0.15 +
            self.coverage_score * 0.05
        )
        
        return performance_score * 0.3 + quality_score * 0.7
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            'method_name': self.method_name,
            'metrics': self.metrics,
            'performance_stats': self.performance_stats,
            'error_count': self.error_count,
            'total_queries': self.total_queries,
            'detailed_results': self.detailed_results,
            'retrieval_precision': self.retrieval_precision,
            'retrieval_recall': self.retrieval_recall,
            'retrieval_f1': self.retrieval_f1,
            'relevance_score': self.relevance_score,
            'coverage_score': self.coverage_score,
            'overall_score': self.get_overall_score()
        }


class Evaluator:
    """评估器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化评估器
        
        Args:
            config: 配置参数
        """
        self.config = config or {}
        
        # 评估配置
        self.evaluation_metrics = EVALUATION_METRICS
        self.output_dir = self.config.get('output_dir', 'results')
        self.enable_detailed_analysis = self.config.get('enable_detailed_analysis', True)
        self.enable_visualization = self.config.get('enable_visualization', True)
        
        # 数据管理器
        self.data_manager = DataManager()
        
        # 检索器实例
        self.retrievers = {}
        
        # 评估结果
        self.evaluation_results = {}
        
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        
        logger.info("📊 评估器初始化完成")
    
    def initialize_retrievers(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化所有检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info("🔧 开始初始化检索器...")
            
            # 初始化各种检索器
            retriever_classes = {
                'vector': VectorRetriever,
                'keyword': KeywordRetriever,
                'hybrid': HybridRetriever,
                'knowledge_graph': KnowledgeGraphRetriever,
                'rerank': RerankRetriever,
                'multi_step': MultiStepRetriever,
                'structured': StructuredRetriever
            }
            
            for method_name, retriever_class in retriever_classes.items():
                try:
                    logger.info(f"🔧 初始化 {method_name} 检索器...")
                    
                    # 获取配置
                    method_config = RETRIEVAL_METHODS.get(method_name, {})
                    
                    # 创建检索器实例
                    retriever = retriever_class(method_config)
                    
                    # 初始化检索器
                    if retriever.initialize(data):
                        self.retrievers[method_name] = retriever
                        logger.info(f"✅ {method_name} 检索器初始化成功")
                    else:
                        logger.error(f"❌ {method_name} 检索器初始化失败")
                        
                except Exception as e:
                    logger.error(f"❌ {method_name} 检索器初始化异常: {e}")
                    continue
            
            logger.info(f"✅ 检索器初始化完成，成功初始化 {len(self.retrievers)} 个检索器")
            return len(self.retrievers) > 0
            
        except Exception as e:
            logger.error(f"❌ 检索器初始化失败: {e}")
            return False
    
    def evaluate_all_methods(self, test_questions: List[Dict[str, Any]], 
                           methods: List[str] = None) -> Dict[str, EvaluationResult]:
        """
        评估所有检索方法
        
        Args:
            test_questions: 测试问题列表
            methods: 要评估的方法列表，None表示评估所有方法
        
        Returns:
            Dict[str, EvaluationResult]: 评估结果
        """
        if methods is None:
            methods = list(self.retrievers.keys())
        
        logger.info(f"🚀 开始评估 {len(methods)} 种检索方法，测试问题数: {len(test_questions)}")
        
        # 初始化评估结果
        for method in methods:
            if method in self.retrievers:
                self.evaluation_results[method] = EvaluationResult(method)
        
        # 逐个评估每种方法
        for method in methods:
            if method not in self.retrievers:
                logger.warning(f"⚠️ 检索器 {method} 未初始化，跳过评估")
                continue
            
            logger.info(f"📊 评估 {method} 检索器...")
            self._evaluate_single_method(method, test_questions)
        
        # 计算总体指标
        for method, result in self.evaluation_results.items():
            result.calculate_metrics()
        
        logger.info("✅ 所有方法评估完成")
        return self.evaluation_results
    
    def _evaluate_single_method(self, method_name: str, test_questions: List[Dict[str, Any]]) -> None:
        """
        评估单个检索方法
        
        Args:
            method_name: 方法名称
            test_questions: 测试问题列表
        """
        retriever = self.retrievers[method_name]
        evaluation_result = self.evaluation_results[method_name]
        
        for i, question_data in enumerate(test_questions):
            try:
                question = question_data['question']
                expected_answer = question_data.get('answer', '')
                
                logger.info(f"📝 [{method_name}] 评估问题 {i+1}/{len(test_questions)}: {question[:50]}...")
                
                # 执行检索
                start_time = time.time()
                retrieval_result = retriever.retrieve(question)
                end_time = time.time()
                
                # 设置响应时间
                retrieval_result.response_time = end_time - start_time
                
                # 评估检索质量
                evaluation_scores = self._evaluate_answer(
                    question, expected_answer, retrieval_result
                )
                
                # 添加到评估结果
                evaluation_result.add_result(
                    question, expected_answer, retrieval_result, evaluation_scores
                )
                
            except Exception as e:
                logger.error(f"❌ [{method_name}] 评估问题失败: {e}")
                # 添加错误结果
                empty_result = RetrievalResult(
                    query=question,
                    answer="",
                    retrieved_docs=[],
                    confidence=0.0,
                    response_time=0.0
                )
                evaluation_result.add_result(
                    question, expected_answer, empty_result, {'precision': 0, 'recall': 0, 'f1': 0, 'relevance': 0}
                )
        
        # 获取性能统计
        try:
            evaluation_result.performance_stats = retriever.get_performance_stats()
        except Exception as e:
            logger.warning(f"⚠️ 获取性能统计失败: {e}")
            evaluation_result.performance_stats = {}
        
        logger.info(f"✅ [{method_name}] 评估完成")
    
    def _evaluate_answer(self, question: str, expected_answer: str, 
                        retrieval_result: RetrievalResult) -> Dict[str, float]:
        """
        评估检索质量（专注于检索效果而非答案生成质量）
        
        Args:
            question: 问题
            expected_answer: 期望答案
            retrieval_result: 检索结果
        
        Returns:
            Dict[str, float]: 评估分数
        """
        # 获取检索到的文档
        retrieved_docs = retrieval_result.retrieved_docs
        
        if not retrieved_docs:
            return {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'relevance': 0.0}
        
        # 1. 检索精度（检索文档与期望答案的相似度）
        relevance_scores = []
        for doc in retrieved_docs:
            doc_content = doc.get('content', doc.get('answer', ''))
            similarity = self._calculate_semantic_similarity(expected_answer, doc_content)
            relevance_scores.append(similarity)
        
        precision = sum(relevance_scores) / len(relevance_scores) if relevance_scores else 0.0
        
        # 2. 检索召回率（最高相似度作为召回指标）
        recall = max(relevance_scores) if relevance_scores else 0.0
        
        # 3. F1分数
        f1 = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
        
        # 4. 相关性评估（问题与检索文档的相关性）
        question_relevance_scores = []
        for doc in retrieved_docs:
            doc_content = doc.get('content', doc.get('answer', ''))
            relevance = self._calculate_semantic_similarity(question, doc_content)
            question_relevance_scores.append(relevance)
        
        relevance_score = sum(question_relevance_scores) / len(question_relevance_scores) if question_relevance_scores else 0.0
        
        return {
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'relevance': relevance_score,
            'retrieval_count': len(retrieved_docs),
            'max_similarity': max(relevance_scores) if relevance_scores else 0.0
        }
    
    def _calculate_lexical_similarity(self, expected: str, actual: str) -> Dict[str, float]:
        """
        计算词汇相似度
        
        Args:
            expected: 期望答案
            actual: 实际答案
        
        Returns:
            Dict[str, float]: 词汇相似度分数
        """
        if not expected or not actual:
            return {'precision': 0.0, 'recall': 0.0}
        
        # 分词
        expected_words = set(expected.lower().split())
        actual_words = set(actual.lower().split())
        
        if not expected_words or not actual_words:
            return {'precision': 0.0, 'recall': 0.0}
        
        # 计算交集
        intersection = expected_words.intersection(actual_words)
        
        # 计算精确度和召回率
        precision = len(intersection) / len(actual_words)
        recall = len(intersection) / len(expected_words)
        
        return {'precision': precision, 'recall': recall}
    
    def _calculate_semantic_similarity(self, expected: str, actual: str) -> float:
        """
        计算语义相似度
        
        Args:
            expected: 期望答案
            actual: 实际答案
        
        Returns:
            float: 语义相似度分数
        """
        try:
            # 使用Ollama计算语义相似度
            similarity = ollama_client.calculate_similarity(expected, actual)
            return similarity
        except Exception as e:
            logger.warning(f"⚠️ 语义相似度计算失败: {e}")
            # 回退到简单的词汇相似度
            lexical_sim = self._calculate_lexical_similarity(expected, actual)
            return (lexical_sim['precision'] + lexical_sim['recall']) / 2
    
    def _calculate_coverage_score(self, retrieved_docs: List[Dict]) -> float:
        """
        计算检索结果的覆盖度（多样性）
        
        Args:
            retrieved_docs: 检索到的文档列表
        
        Returns:
            float: 覆盖度分数
        """
        try:
            if len(retrieved_docs) <= 1:
                return 1.0 if retrieved_docs else 0.0
            
            # 计算文档之间的相似度
            similarities = []
            contents = [doc.get('content', doc.get('answer', '')) for doc in retrieved_docs]
            
            for i in range(len(contents)):
                for j in range(i + 1, len(contents)):
                    sim = self._calculate_semantic_similarity(contents[i], contents[j])
                    similarities.append(sim)
            
            # 覆盖度 = 1 - 平均相似度（相似度越低，多样性越高）
            avg_similarity = sum(similarities) / len(similarities) if similarities else 0.0
            coverage = 1.0 - avg_similarity
            
            return max(0.0, min(1.0, coverage))
            
        except Exception as e:
            logger.warning(f"⚠️ 覆盖度计算失败: {e}")
            return 0.5
    
    def _calculate_keyword_relevance(self, question: str, content: str) -> float:
        """
        计算内容与问题的关键词相关性
        
        Args:
            question: 问题
            content: 内容
        
        Returns:
            float: 关键词相关性分数
        """
        if not question or not content:
            return 0.0
        
        # 提取关键词
        question_keywords = set(question.lower().split())
        content_keywords = set(content.lower().split())
        
        # 移除停用词
        stop_words = {'的', '是', '在', '有', '和', '与', '或', '但', '如果', '因为', '所以'}
        question_keywords -= stop_words
        content_keywords -= stop_words
        
        if not question_keywords:
            return 0.0
        
        # 计算关键词重叠度
        intersection = question_keywords.intersection(content_keywords)
        relevance = len(intersection) / len(question_keywords)
        
        return relevance
    
    def generate_report(self, output_file: str = None) -> str:
        """
        生成评估报告
        
        Args:
            output_file: 输出文件路径
        
        Returns:
            str: 报告内容
        """
        if not self.evaluation_results:
            logger.warning("⚠️ 没有评估结果，无法生成报告")
            return ""
        
        logger.info("📄 生成评估报告...")
        
        # 生成报告内容
        report_lines = []
        report_lines.append("# 检索方法效果对比报告")
        report_lines.append(f"\n生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        report_lines.append(f"评估方法数量: {len(self.evaluation_results)}")
        
        # 总体对比表格
        report_lines.append("\n## 总体性能对比")
        report_lines.append("\n| 检索方式 | 精度 | 召回率 | F1分数 | 相关性 | 平均响应时间(s) | 成功率 | 综合评分 |")
        report_lines.append("|---------|------|--------|--------|--------|----------------|--------|----------|")
        
        for method_name, result in self.evaluation_results.items():
            metrics = result.metrics
            overall_score = result.get_overall_score()
            report_lines.append(
                f"| {method_name} | {metrics.get('precision', 0):.3f} | "
                f"{metrics.get('recall', 0):.3f} | {metrics.get('f1', 0):.3f} | "
                f"{metrics.get('relevance', 0):.3f} | {metrics.get('avg_response_time', 0):.3f} | "
                f"{metrics.get('success_rate', 0):.3f} | {overall_score:.3f} |"
            )
        
        # 详细分析
        if self.enable_detailed_analysis:
            report_lines.append("\n## 详细分析")
            
            for method_name, result in self.evaluation_results.items():
                report_lines.append(f"\n### {method_name} 检索器")
                
                metrics = result.metrics
                stats = result.performance_stats
                
                report_lines.append(f"\n**检索质量指标:**")
                report_lines.append(f"- 检索精度: {result.retrieval_precision:.3f}")
                report_lines.append(f"- 检索召回率: {result.retrieval_recall:.3f}")
                report_lines.append(f"- F1分数: {result.retrieval_f1:.3f}")
                report_lines.append(f"- 相关性分数: {result.relevance_score:.3f}")
                report_lines.append(f"- 覆盖度分数: {result.coverage_score:.3f}")
                report_lines.append(f"- 平均置信度: {metrics.get('avg_confidence', 0):.3f}")
                report_lines.append(f"- 综合评分: {result.get_overall_score():.3f}")
                
                report_lines.append(f"\n**响应性能:**")
                report_lines.append(f"- 平均响应时间: {metrics.get('avg_response_time', 0):.3f}s")
                report_lines.append(f"- 中位响应时间: {metrics.get('median_response_time', 0):.3f}s")
                report_lines.append(f"- 成功率: {metrics.get('success_rate', 0):.3f}")
                report_lines.append(f"- 错误次数: {result.error_count}/{result.total_queries}")
                
                if stats:
                    report_lines.append(f"\n**检索统计:**")
                    report_lines.append(f"- 总查询次数: {stats.get('query_count', 0)}")
                    report_lines.append(f"- 平均检索文档数: {metrics.get('avg_retrieved_docs', 0):.1f}")
        
        # 结论和建议
        report_lines.append("\n## 结论和建议")
        report_lines.extend(self._generate_conclusions())
        
        report_content = "\n".join(report_lines)
        
        # 保存报告
        if output_file is None:
            output_file = os.path.join(self.output_dir, f"evaluation_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
        
        try:
            with open(output_file, 'w', encoding='utf-8') as f:
                f.write(report_content)
            logger.info(f"📄 评估报告已保存: {output_file}")
        except Exception as e:
            logger.error(f"❌ 保存报告失败: {e}")
        
        return report_content
    
    def _generate_conclusions(self) -> List[str]:
        """
        生成结论和建议
        
        Returns:
            List[str]: 结论列表
        """
        conclusions = []
        
        if not self.evaluation_results:
            return conclusions
        
        # 找出最佳方法
        best_overall = max(self.evaluation_results.items(), key=lambda x: x[1].get_overall_score())
        best_precision = max(self.evaluation_results.items(), key=lambda x: x[1].retrieval_precision)
        best_recall = max(self.evaluation_results.items(), key=lambda x: x[1].retrieval_recall)
        best_f1 = max(self.evaluation_results.items(), key=lambda x: x[1].retrieval_f1)
        best_speed = min(self.evaluation_results.items(), key=lambda x: x[1].metrics.get('avg_response_time', float('inf')))
        
        conclusions.append(f"\n**最佳综合性能:** {best_overall[0]} (评分: {best_overall[1].get_overall_score():.3f})")
        conclusions.append(f"**最佳检索精度:** {best_precision[0]} (精度: {best_precision[1].retrieval_precision:.3f})")
        conclusions.append(f"**最佳召回率:** {best_recall[0]} (召回: {best_recall[1].retrieval_recall:.3f})")
        conclusions.append(f"**最佳F1分数:** {best_f1[0]} (F1: {best_f1[1].retrieval_f1:.3f})")
        conclusions.append(f"**最快响应:** {best_speed[0]} (时间: {best_speed[1].metrics.get('avg_response_time', 0):.3f}s)")
        
        # 适用场景建议
        conclusions.append("\n**适用场景建议:**")
        
        for method_name, result in self.evaluation_results.items():
            metrics = result.metrics
            precision = result.retrieval_precision
            speed = metrics.get('avg_response_time', 0)
            overall_score = result.get_overall_score()
            
            if overall_score > 0.8:
                conclusions.append(f"- {method_name}: 推荐用于高要求场景 (综合评分: {overall_score:.3f})")
            elif precision > 0.8 and speed < 1.0:
                conclusions.append(f"- {method_name}: 适合高精度快速场景")
            elif precision > 0.7:
                conclusions.append(f"- {method_name}: 适合高质量要求场景")
            elif speed < 0.5:
                conclusions.append(f"- {method_name}: 适合实时响应场景")
            else:
                conclusions.append(f"- {method_name}: 适合一般应用场景")
        
        return conclusions
    
    def save_detailed_results(self, output_file: str = None) -> str:
        """
        保存详细评估结果
        
        Args:
            output_file: 输出文件路径
        
        Returns:
            str: 输出文件路径
        """
        if output_file is None:
            output_file = os.path.join(self.output_dir, f"detailed_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json")
        
        try:
            # 转换为可序列化的格式
            results_data = {}
            for method_name, result in self.evaluation_results.items():
                results_data[method_name] = result.to_dict()
            
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump(results_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"💾 详细结果已保存: {output_file}")
            return output_file
            
        except Exception as e:
            logger.error(f"❌ 保存详细结果失败: {e}")
            return ""
    
    def create_visualizations(self) -> List[str]:
        """
        创建可视化图表
        
        Returns:
            List[str]: 生成的图表文件路径列表
        """
        if not self.enable_visualization or not self.evaluation_results:
            return []
        
        logger.info("📊 生成可视化图表...")
        
        chart_files = []
        
        try:
            # 设置中文字体
            plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
            plt.rcParams['axes.unicode_minus'] = False
            
            # 1. 性能对比雷达图
            chart_files.append(self._create_radar_chart())
            
            # 2. 响应时间对比柱状图
            chart_files.append(self._create_response_time_chart())
            
            # 3. 精确度-召回率散点图
            chart_files.append(self._create_precision_recall_chart())
            
            # 4. 综合性能热力图
            chart_files.append(self._create_heatmap())
            
            logger.info(f"📊 生成 {len(chart_files)} 个可视化图表")
            
        except Exception as e:
            logger.error(f"❌ 生成可视化图表失败: {e}")
        
        return [f for f in chart_files if f]
    
    def _create_radar_chart(self) -> str:
        """
        创建性能对比雷达图
        
        Returns:
            str: 图表文件路径
        """
        try:
            import numpy as np
            
            # 准备数据
            methods = list(self.evaluation_results.keys())
            metrics = ['retrieval_precision', 'retrieval_recall', 'retrieval_f1', 'relevance_score', 'coverage_score']
            
            fig, ax = plt.subplots(figsize=(10, 8), subplot_kw=dict(projection='polar'))
            
            angles = np.linspace(0, 2 * np.pi, len(metrics), endpoint=False).tolist()
            angles += angles[:1]  # 闭合图形
            
            for method in methods:
                values = []
                result = self.evaluation_results[method]
                for metric in metrics:
                    if metric == 'retrieval_precision':
                        values.append(result.retrieval_precision)
                    elif metric == 'retrieval_recall':
                        values.append(result.retrieval_recall)
                    elif metric == 'retrieval_f1':
                        values.append(result.retrieval_f1)
                    elif metric == 'relevance_score':
                        values.append(result.relevance_score)
                    elif metric == 'coverage_score':
                        values.append(result.coverage_score)
                    else:
                        values.append(0)
                values += values[:1]  # 闭合图形
                
                ax.plot(angles, values, 'o-', linewidth=2, label=method)
                ax.fill(angles, values, alpha=0.25)
            
            ax.set_xticks(angles[:-1])
            ax.set_xticklabels(['检索精度', '检索召回', 'F1分数', '相关性', '覆盖度'])
            ax.set_ylim(0, 1)
            ax.set_title('检索方法性能对比雷达图', size=16, pad=20)
            ax.legend(loc='upper right', bbox_to_anchor=(1.3, 1.0))
            
            output_file = os.path.join(self.output_dir, 'performance_radar.png')
            plt.tight_layout()
            plt.savefig(output_file, dpi=300, bbox_inches='tight')
            plt.close()
            
            return output_file
            
        except Exception as e:
            logger.error(f"❌ 创建雷达图失败: {e}")
            return ""
    
    def _create_response_time_chart(self) -> str:
        """
        创建响应时间对比柱状图
        
        Returns:
            str: 图表文件路径
        """
        try:
            methods = list(self.evaluation_results.keys())
            response_times = [self.evaluation_results[method].metrics.get('avg_response_time', 0) for method in methods]
            
            fig, ax = plt.subplots(figsize=(12, 6))
            bars = ax.bar(methods, response_times, color='skyblue', alpha=0.7)
            
            # 添加数值标签
            for bar, time in zip(bars, response_times):
                height = bar.get_height()
                ax.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                       f'{time:.3f}s', ha='center', va='bottom')
            
            ax.set_xlabel('检索方法')
            ax.set_ylabel('平均响应时间 (秒)')
            ax.set_title('检索方法响应时间对比')
            plt.xticks(rotation=45)
            
            output_file = os.path.join(self.output_dir, 'response_time_comparison.png')
            plt.tight_layout()
            plt.savefig(output_file, dpi=300, bbox_inches='tight')
            plt.close()
            
            return output_file
            
        except Exception as e:
            logger.error(f"❌ 创建响应时间图失败: {e}")
            return ""
    
    def _create_precision_recall_chart(self) -> str:
        """
        创建精确度-召回率散点图
        
        Returns:
            str: 图表文件路径
        """
        try:
            fig, ax = plt.subplots(figsize=(10, 8))
            
            for method, result in self.evaluation_results.items():
                precision = result.retrieval_precision
                recall = result.retrieval_recall
                
                ax.scatter(recall, precision, s=100, alpha=0.7, label=method)
                ax.annotate(method, (recall, precision), xytext=(5, 5), 
                           textcoords='offset points', fontsize=10)
            
            ax.set_xlabel('召回率')
            ax.set_ylabel('精确度')
            ax.set_title('精确度-召回率分布图')
            ax.set_xlim(0, 1)
            ax.set_ylim(0, 1)
            ax.grid(True, alpha=0.3)
            ax.legend()
            
            output_file = os.path.join(self.output_dir, 'precision_recall_scatter.png')
            plt.tight_layout()
            plt.savefig(output_file, dpi=300, bbox_inches='tight')
            plt.close()
            
            return output_file
            
        except Exception as e:
            logger.error(f"❌ 创建精确度-召回率图失败: {e}")
            return ""
    
    def _create_heatmap(self) -> str:
        """
        创建综合性能热力图
        
        Returns:
            str: 图表文件路径
        """
        try:
            methods = list(self.evaluation_results.keys())
            metrics = ['retrieval_precision', 'retrieval_recall', 'retrieval_f1', 'relevance_score', 'coverage_score']
            
            # 准备数据矩阵
            data_matrix = []
            for method in methods:
                row = []
                result = self.evaluation_results[method]
                for metric in metrics:
                    if metric == 'retrieval_precision':
                        row.append(result.retrieval_precision)
                    elif metric == 'retrieval_recall':
                        row.append(result.retrieval_recall)
                    elif metric == 'retrieval_f1':
                        row.append(result.retrieval_f1)
                    elif metric == 'relevance_score':
                        row.append(result.relevance_score)
                    elif metric == 'coverage_score':
                        row.append(result.coverage_score)
                    else:
                        row.append(0)
                data_matrix.append(row)
            
            # 创建热力图
            fig, ax = plt.subplots(figsize=(10, 8))
            im = ax.imshow(data_matrix, cmap='YlOrRd', aspect='auto', vmin=0, vmax=1)
            
            # 设置标签
            ax.set_xticks(range(len(metrics)))
            ax.set_yticks(range(len(methods)))
            ax.set_xticklabels(metrics)
            ax.set_yticklabels(methods)
            
            # 添加数值标签
            for i in range(len(methods)):
                for j in range(len(metrics)):
                    text = ax.text(j, i, f'{data_matrix[i][j]:.3f}',
                                 ha="center", va="center", color="black", fontsize=10)
            
            ax.set_title('检索方法综合性能热力图')
            plt.colorbar(im, ax=ax)
            
            output_file = os.path.join(self.output_dir, 'performance_heatmap.png')
            plt.tight_layout()
            plt.savefig(output_file, dpi=300, bbox_inches='tight')
            plt.close()
            
            return output_file
            
        except Exception as e:
            logger.error(f"❌ 创建热力图失败: {e}")
            return ""


if __name__ == "__main__":
    # 测试评估器
    evaluator = Evaluator()
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": 1,
            "difficulty": "medium",
            "type": "qa",
            "keywords": ["机器学习", "人工智能", "算法"]
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": 1,
            "difficulty": "hard",
            "type": "qa",
            "keywords": ["深度学习", "神经网络", "学习"]
        }
    ]
    
    # 初始化检索器
    if evaluator.initialize_retrievers(test_data):
        # 评估所有方法
        results = evaluator.evaluate_all_methods(test_data)
        
        # 生成报告
        report = evaluator.generate_report()
        print(report)
        
        # 保存详细结果
        evaluator.save_detailed_results()
        
        # 创建可视化
        charts = evaluator.create_visualizations()
        print(f"生成图表: {charts}")