"""
RAG精准度评估模块
支持检索指标和生成指标的评估
"""
import os
import json
import logging
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime
from dataclasses import dataclass, asdict
from langchain_core.documents import Document

from utils.rag_service import RAGService

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


@dataclass
class TestCase:
    """测试用例数据类"""
    question: str  # 问题
    expected_answer: Optional[str] = None  # 期望答案（可选）
    expected_sources: Optional[List[str]] = None  # 期望的来源文档（可选）
    ground_truth_docs: Optional[List[str]] = None  # 真实相关的文档ID列表（用于计算检索指标）


@dataclass
class RetrievalMetrics:
    """检索指标"""
    hit_at_k: Dict[int, float]  # Hit@K for different K values
    recall_at_k: Dict[int, float]  # Recall@K for different K values
    mrr: float  # Mean Reciprocal Rank


@dataclass
class GenerationMetrics:
    """生成指标"""
    source_support_score: float  # 引用来源支撑度得分 (0-1)
    hallucination_score: float  # 幻觉检测得分 (0-1, 1表示无幻觉)
    overall_quality_score: float  # 综合质量得分


@dataclass
class EvaluationResult:
    """评估结果"""
    test_case: TestCase
    retrieved_docs: List[Document]
    answer: str
    sources: List[Dict[str, str]]
    retrieval_metrics: Optional[RetrievalMetrics] = None
    generation_metrics: Optional[GenerationMetrics] = None
    retrieval_scores: Optional[List[float]] = None  # 检索相似度分数


class RAGEvaluator:
    """RAG评估器"""
    
    def __init__(
        self,
        rag_service: RAGService,
        annotation_file: Optional[str] = None
    ):
        """
        初始化评估器
        
        Args:
            rag_service: RAG服务实例
            annotation_file: 标注集文件路径（JSON格式）
        """
        self.rag_service = rag_service
        self.annotation_file = annotation_file
        self.test_cases: List[TestCase] = []
        
    def generate_travel_annotation_set(self, num_questions: int = 200) -> List[TestCase]:
        """
        生成旅游业务常见问题标注集
        
        Args:
            num_questions: 生成的问题数量（100-300之间）
        
        Returns:
            测试用例列表
        """
        if num_questions < 100:
            num_questions = 100
        elif num_questions > 300:
            num_questions = 300
            
        # 旅游业务常见问题模板
        question_templates = [
            # 景点相关
            ("XX景点门票多少钱", ["XX景点", "门票", "价格", "费用"]),
            ("XX景点开放时间", ["XX景点", "开放时间", "营业时间"]),
            ("XX景点怎么去", ["XX景点", "交通", "路线", "怎么去"]),
            ("XX景点有什么好玩的", ["XX景点", "景点介绍", "游玩项目"]),
            ("XX景点需要预约吗", ["XX景点", "预约", "预订"]),
            ("XX景点适合带小孩吗", ["XX景点", "亲子", "儿童", "适合"]),
            
            # 天气相关
            ("雨天能爬山吗", ["天气", "雨天", "爬山", "安全"]),
            ("XX地方现在天气怎么样", ["XX地方", "天气", "天气预报"]),
            ("XX季节适合去XX地方旅游吗", ["XX季节", "XX地方", "旅游", "适合"]),
            
            # 住宿相关
            ("XX地方有什么推荐的酒店", ["XX地方", "酒店", "住宿", "推荐"]),
            ("XX酒店价格是多少", ["XX酒店", "价格", "费用"]),
            ("XX酒店离XX景点远吗", ["XX酒店", "XX景点", "距离", "交通"]),
            
            # 美食相关
            ("XX地方有什么特色美食", ["XX地方", "美食", "特色", "小吃"]),
            ("XX地方哪里有好吃的", ["XX地方", "美食", "餐厅", "推荐"]),
            
            # 交通相关
            ("从XX到XX怎么走", ["XX", "XX", "交通", "路线"]),
            ("XX地方有地铁吗", ["XX地方", "地铁", "交通"]),
            ("XX地方停车方便吗", ["XX地方", "停车", "停车场"]),
            
            # 费用相关
            ("XX地方旅游大概需要多少钱", ["XX地方", "旅游", "费用", "预算"]),
            ("XX景点学生票有优惠吗", ["XX景点", "学生票", "优惠", "折扣"]),
            
            # 时间相关
            ("XX地方玩几天合适", ["XX地方", "游玩", "天数", "时间"]),
            ("XX景点什么时候人少", ["XX景点", "人少", "淡季", "时间"]),
            
            # 安全相关
            ("XX地方安全吗", ["XX地方", "安全", "治安"]),
            ("XX地方需要注意什么", ["XX地方", "注意事项", "安全"]),
            
            # 其他
            ("XX地方有什么特产", ["XX地方", "特产", "纪念品"]),
            ("XX地方有什么节日活动", ["XX地方", "节日", "活动", "庆典"]),
            ("XX地方适合拍照的地方", ["XX地方", "拍照", "景点", "推荐"]),
        ]
        
        # 常见景点和地名
        places = [
            "北京", "上海", "广州", "深圳", "杭州", "成都", "西安", "南京",
            "苏州", "厦门", "青岛", "大连", "三亚", "丽江", "大理", "桂林",
            "黄山", "泰山", "华山", "峨眉山", "九寨沟", "张家界", "西湖",
            "故宫", "天安门", "长城", "兵马俑", "外滩", "东方明珠"
        ]
        
        test_cases = []
        question_count = 0
        
        # 生成具体问题
        for template, keywords in question_templates:
            if question_count >= num_questions:
                break
                
            # 替换模板中的XX
            if "XX" in template:
                for place in places:
                    if question_count >= num_questions:
                        break
                    question = template.replace("XX", place)
                    test_cases.append(TestCase(
                        question=question,
                        expected_sources=None,  # 需要人工标注
                        ground_truth_docs=None  # 需要人工标注
                    ))
                    question_count += 1
            else:
                test_cases.append(TestCase(
                    question=template,
                    expected_sources=None,
                    ground_truth_docs=None
                ))
                question_count += 1
        
        # 如果还不够，添加一些通用问题
        generic_questions = [
            "旅游需要准备什么",
            "如何规划旅游路线",
            "旅游保险有必要买吗",
            "旅游时如何省钱",
            "旅游时如何避免被坑",
            "旅游时如何保护环境",
            "旅游时如何尊重当地文化",
            "旅游时如何保持安全",
            "旅游时如何保持健康",
            "旅游时如何拍照",
        ]
        
        for q in generic_questions:
            if question_count >= num_questions:
                break
            test_cases.append(TestCase(
                question=q,
                expected_sources=None,
                ground_truth_docs=None
            ))
            question_count += 1
        
        self.test_cases = test_cases[:num_questions]
        logger.info(f"生成了 {len(self.test_cases)} 个测试用例")
        return self.test_cases
    
    def save_annotation_set(self, file_path: str) -> bool:
        """
        保存标注集到文件
        
        Args:
            file_path: 保存路径
        
        Returns:
            是否成功
        """
        try:
            data = {
                "created_at": datetime.now().isoformat(),
                "test_cases": [asdict(tc) for tc in self.test_cases]
            }
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logger.info(f"标注集已保存到: {file_path}")
            return True
        except Exception as e:
            logger.error(f"保存标注集失败: {e}")
            return False
    
    def load_annotation_set(self, file_path: Optional[str] = None) -> List[TestCase]:
        """
        从文件加载标注集
        
        Args:
            file_path: 文件路径，如果为None则使用self.annotation_file
        
        Returns:
            测试用例列表
        """
        file_path = file_path or self.annotation_file
        if not file_path or not os.path.exists(file_path):
            logger.warning(f"标注集文件不存在: {file_path}")
            return []
        
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            self.test_cases = [
                TestCase(**tc_data) for tc_data in data.get("test_cases", [])
            ]
            logger.info(f"成功加载 {len(self.test_cases)} 个测试用例")
            return self.test_cases
        except Exception as e:
            logger.error(f"加载标注集失败: {e}")
            return []
    
    def calculate_retrieval_metrics(
        self,
        retrieved_docs: List[Document],
        ground_truth_doc_ids: List[str],
        k_values: List[int] = [1, 3, 5, 10]
    ) -> RetrievalMetrics:
        """
        计算检索指标
        
        Args:
            retrieved_docs: 检索到的文档列表
            ground_truth_doc_ids: 真实相关的文档ID列表
            k_values: 要计算的K值列表
        
        Returns:
            检索指标
        """
        if not ground_truth_doc_ids:
            # 如果没有标注的真实文档，返回空指标
            return RetrievalMetrics(
                hit_at_k={k: 0.0 for k in k_values},
                recall_at_k={k: 0.0 for k in k_values},
                mrr=0.0
            )
        
        # 提取检索到的文档ID（从metadata中的source或使用内容哈希）
        retrieved_doc_ids = []
        for doc in retrieved_docs:
            # 使用source作为文档ID，如果没有则使用内容哈希
            doc_id = doc.metadata.get('source', '')
            if not doc_id:
                import hashlib
                doc_id = hashlib.md5(doc.page_content.encode('utf-8')).hexdigest()
            retrieved_doc_ids.append(doc_id)
        
        # 计算Hit@K
        hit_at_k = {}
        for k in k_values:
            top_k_ids = retrieved_doc_ids[:k]
            hits = sum(1 for doc_id in top_k_ids if doc_id in ground_truth_doc_ids)
            hit_at_k[k] = 1.0 if hits > 0 else 0.0
        
        # 计算Recall@K
        recall_at_k = {}
        total_relevant = len(ground_truth_doc_ids)
        for k in k_values:
            top_k_ids = retrieved_doc_ids[:k]
            relevant_retrieved = sum(1 for doc_id in top_k_ids if doc_id in ground_truth_doc_ids)
            recall_at_k[k] = relevant_retrieved / total_relevant if total_relevant > 0 else 0.0
        
        # 计算MRR (Mean Reciprocal Rank)
        mrr = 0.0
        for i, doc_id in enumerate(retrieved_doc_ids, 1):
            if doc_id in ground_truth_doc_ids:
                mrr = 1.0 / i
                break
        
        return RetrievalMetrics(
            hit_at_k=hit_at_k,
            recall_at_k=recall_at_k,
            mrr=mrr
        )
    
    def calculate_generation_metrics(
        self,
        answer: str,
        retrieved_docs: List[Document],
        sources: List[Dict[str, str]],
        expected_answer: Optional[str] = None
    ) -> GenerationMetrics:
        """
        计算生成指标
        
        Args:
            answer: 生成的答案
            retrieved_docs: 检索到的文档列表
            sources: 来源信息列表
            expected_answer: 期望答案（可选，用于更精确的评估）
        
        Returns:
            生成指标
        """
        # 1. 计算引用来源支撑度
        source_support_score = self._calculate_source_support(answer, retrieved_docs)
        
        # 2. 计算幻觉检测得分
        hallucination_score = self._detect_hallucination(answer, retrieved_docs)
        
        # 3. 综合质量得分
        overall_quality_score = (source_support_score * 0.6 + hallucination_score * 0.4)
        
        return GenerationMetrics(
            source_support_score=source_support_score,
            hallucination_score=hallucination_score,
            overall_quality_score=overall_quality_score
        )
    
    def _calculate_source_support(
        self,
        answer: str,
        retrieved_docs: List[Document]
    ) -> float:
        """
        计算引用来源支撑度
        
        方法：检查答案中的关键信息是否能在检索到的文档中找到支撑
        
        Returns:
            支撑度得分 (0-1)
        """
        if not answer or not retrieved_docs:
            return 0.0
        
        # 提取答案中的关键信息（名词、数字等）
        import re
        answer_keywords = set()
        
        # 提取中文词汇（2字以上）
        answer_keywords.update(re.findall(r'[\u4e00-\u9fa5]{2,}', answer))
        
        # 提取数字
        answer_keywords.update(re.findall(r'\d+', answer))
        
        # 提取英文单词
        answer_keywords.update(re.findall(r'[a-zA-Z]{3,}', answer))
        
        if not answer_keywords:
            return 0.5  # 如果没有关键词，给中等分数
        
        # 合并所有检索文档的内容
        all_context = " ".join([doc.page_content for doc in retrieved_docs])
        
        # 计算答案关键词在文档中的覆盖率
        supported_keywords = 0
        for keyword in answer_keywords:
            if keyword in all_context:
                supported_keywords += 1
        
        support_ratio = supported_keywords / len(answer_keywords) if answer_keywords else 0.0
        
        return min(support_ratio, 1.0)
    
    def _detect_hallucination(
        self,
        answer: str,
        retrieved_docs: List[Document]
    ) -> float:
        """
        检测幻觉
        
        方法：检查答案中是否包含文档中没有的信息
        
        Returns:
            幻觉检测得分 (0-1, 1表示无幻觉)
        """
        if not answer or not retrieved_docs:
            return 0.0
        
        # 合并所有检索文档的内容
        all_context = " ".join([doc.page_content for doc in retrieved_docs])
        
        # 提取答案中的关键事实（数字、专有名词等）
        import re
        
        # 提取数字事实
        answer_numbers = set(re.findall(r'\d+\.?\d*', answer))
        context_numbers = set(re.findall(r'\d+\.?\d*', all_context))
        
        # 检查答案中的数字是否都在上下文中
        unsupported_numbers = answer_numbers - context_numbers
        
        # 提取专有名词（大写字母开头的词、中文地名等）
        answer_entities = set()
        answer_entities.update(re.findall(r'[A-Z][a-z]+', answer))  # 英文专有名词
        answer_entities.update(re.findall(r'[\u4e00-\u9fa5]{2,}', answer))  # 中文词汇
        
        context_entities = set()
        context_entities.update(re.findall(r'[A-Z][a-z]+', all_context))
        context_entities.update(re.findall(r'[\u4e00-\u9fa5]{2,}', all_context))
        
        # 检查是否有明显不在上下文中的实体
        unsupported_entities = answer_entities - context_entities
        
        # 计算幻觉得分（未支持的实体越少，得分越高）
        total_facts = len(answer_numbers) + len(answer_entities)
        unsupported_facts = len(unsupported_numbers) + len(unsupported_entities)
        
        if total_facts == 0:
            return 0.8  # 如果没有可检测的事实，给较高分数
        
        hallucination_ratio = unsupported_facts / total_facts
        hallucination_score = 1.0 - min(hallucination_ratio, 1.0)
        
        return max(hallucination_score, 0.0)
    
    def evaluate_single_case(
        self,
        test_case: TestCase,
        use_hybrid: bool = True,
        k: int = 5
    ) -> EvaluationResult:
        """
        评估单个测试用例
        
        Args:
            test_case: 测试用例
            use_hybrid: 是否使用混合检索
            k: 检索返回的文档数量
        
        Returns:
            评估结果
        """
        # 执行检索
        if use_hybrid:
            retrieved_docs = self.rag_service.hybrid_search(
                test_case.question, k=k, apply_filter=True
            )
        else:
            retrieved_docs = self.rag_service.semantic_search(
                test_case.question, k=k, apply_filter=True
            )
        
        # 获取检索相似度分数
        retrieval_scores = None
        try:
            results_with_scores = self.rag_service.vector_store.similarity_search_with_score(
                test_case.question, k=k * 2
            )
            retrieval_scores = [score for _, score in results_with_scores[:k]]
        except:
            pass
        
        # 执行查询获取答案
        result = self.rag_service.query(
            test_case.question,
            use_hybrid=use_hybrid,
            return_sources=True
        )
        
        answer = result.get("answer", "")
        sources = result.get("sources", [])
        
        # 计算检索指标
        retrieval_metrics = None
        if test_case.ground_truth_docs:
            retrieval_metrics = self.calculate_retrieval_metrics(
                retrieved_docs,
                test_case.ground_truth_docs
            )
        
        # 计算生成指标
        generation_metrics = self.calculate_generation_metrics(
            answer,
            retrieved_docs,
            sources,
            test_case.expected_answer
        )
        
        return EvaluationResult(
            test_case=test_case,
            retrieved_docs=retrieved_docs,
            answer=answer,
            sources=sources,
            retrieval_metrics=retrieval_metrics,
            generation_metrics=generation_metrics,
            retrieval_scores=retrieval_scores
        )
    
    def evaluate_all(
        self,
        test_cases: Optional[List[TestCase]] = None,
        use_hybrid: bool = True,
        k: int = 5
    ) -> List[EvaluationResult]:
        """
        评估所有测试用例
        
        Args:
            test_cases: 测试用例列表，如果为None则使用self.test_cases
            use_hybrid: 是否使用混合检索
            k: 检索返回的文档数量
        
        Returns:
            评估结果列表
        """
        test_cases = test_cases or self.test_cases
        if not test_cases:
            logger.warning("没有测试用例可评估")
            return []
        
        results = []
        total = len(test_cases)
        
        logger.info(f"开始评估 {total} 个测试用例...")
        
        for i, test_case in enumerate(test_cases, 1):
            try:
                logger.info(f"评估进度: {i}/{total} - {test_case.question[:50]}...")
                result = self.evaluate_single_case(test_case, use_hybrid=use_hybrid, k=k)
                results.append(result)
            except Exception as e:
                logger.error(f"评估测试用例失败: {test_case.question} - {e}")
        
        logger.info(f"评估完成，共评估 {len(results)} 个测试用例")
        return results
    
    def generate_report(
        self,
        results: List[EvaluationResult],
        output_file: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        生成评估报告
        
        Args:
            results: 评估结果列表
            output_file: 输出文件路径（可选）
        
        Returns:
            报告字典
        """
        if not results:
            logger.warning("没有评估结果可生成报告")
            return {}
        
        # 计算平均检索指标
        retrieval_metrics_list = [r.retrieval_metrics for r in results if r.retrieval_metrics]
        avg_retrieval_metrics = None
        
        if retrieval_metrics_list:
            k_values = list(retrieval_metrics_list[0].hit_at_k.keys())
            avg_hit_at_k = {
                k: sum(m.hit_at_k[k] for m in retrieval_metrics_list) / len(retrieval_metrics_list)
                for k in k_values
            }
            avg_recall_at_k = {
                k: sum(m.recall_at_k[k] for m in retrieval_metrics_list) / len(retrieval_metrics_list)
                for k in k_values
            }
            avg_mrr = sum(m.mrr for m in retrieval_metrics_list) / len(retrieval_metrics_list)
            
            avg_retrieval_metrics = {
                "hit_at_k": avg_hit_at_k,
                "recall_at_k": avg_recall_at_k,
                "mrr": avg_mrr
            }
        
        # 计算平均生成指标
        generation_metrics_list = [r.generation_metrics for r in results if r.generation_metrics]
        avg_generation_metrics = None
        
        if generation_metrics_list:
            avg_source_support = sum(m.source_support_score for m in generation_metrics_list) / len(generation_metrics_list)
            avg_hallucination = sum(m.hallucination_score for m in generation_metrics_list) / len(generation_metrics_list)
            avg_quality = sum(m.overall_quality_score for m in generation_metrics_list) / len(generation_metrics_list)
            
            avg_generation_metrics = {
                "source_support_score": avg_source_support,
                "hallucination_score": avg_hallucination,
                "overall_quality_score": avg_quality
            }
        
        # 构建报告
        report = {
            "evaluation_time": datetime.now().isoformat(),
            "total_cases": len(results),
            "retrieval_metrics": avg_retrieval_metrics,
            "generation_metrics": avg_generation_metrics,
            "detailed_results": [
                {
                    "question": r.test_case.question,
                    "answer": r.answer[:200] + "..." if len(r.answer) > 200 else r.answer,
                    "retrieval_metrics": asdict(r.retrieval_metrics) if r.retrieval_metrics else None,
                    "generation_metrics": asdict(r.generation_metrics) if r.generation_metrics else None,
                    "num_sources": len(r.sources)
                }
                for r in results
            ]
        }
        
        # 保存报告
        if output_file:
            try:
                with open(output_file, 'w', encoding='utf-8') as f:
                    json.dump(report, f, ensure_ascii=False, indent=2)
                logger.info(f"评估报告已保存到: {output_file}")
            except Exception as e:
                logger.error(f"保存报告失败: {e}")
        
        # 打印报告摘要
        self._print_report_summary(report)
        
        return report
    
    def _print_report_summary(self, report: Dict[str, Any]) -> None:
        """打印报告摘要"""
        print("\n" + "=" * 80)
        print("RAG评估报告摘要")
        print("=" * 80)
        print(f"评估时间: {report['evaluation_time']}")
        print(f"测试用例总数: {report['total_cases']}")
        
        if report.get('retrieval_metrics'):
            print("\n检索指标:")
            rm = report['retrieval_metrics']
            print(f"  平均MRR: {rm['mrr']:.4f}")
            print("  Hit@K:")
            for k, score in rm['hit_at_k'].items():
                print(f"    Hit@{k}: {score:.4f}")
            print("  Recall@K:")
            for k, score in rm['recall_at_k'].items():
                print(f"    Recall@{k}: {score:.4f}")
        
        if report.get('generation_metrics'):
            print("\n生成指标:")
            gm = report['generation_metrics']
            print(f"  引用来源支撑度: {gm['source_support_score']:.4f}")
            print(f"  幻觉检测得分: {gm['hallucination_score']:.4f}")
            print(f"  综合质量得分: {gm['overall_quality_score']:.4f}")
        
        print("=" * 80 + "\n")











