"""
评估指标模块
计算检索准确率和摘要质量
"""
import logging
from typing import List, Dict
from rouge_score import rouge_scorer
from sklearn.metrics import precision_at_k

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class MetricsEvaluator:
    """评估指标计算器"""
    
    def __init__(self):
        self.rouge_scorer = rouge_scorer.RougeScorer(
            ['rouge1', 'rouge2', 'rougeL'], 
            use_stemmer=True
        )
    
    def calculate_precision_at_k(
        self, 
        relevant_papers: List[str], 
        retrieved_papers: List[str], 
        k: int = 10
    ) -> float:
        """
        计算Precision@K
        
        Args:
            relevant_papers: 相关论文列表
            retrieved_papers: 检索到的论文列表
            k: K值
            
        Returns:
            Precision@K分数
        """
        relevant_set = set(relevant_papers)
        retrieved_k = retrieved_papers[:k]
        
        if len(retrieved_k) == 0:
            return 0.0
        
        relevant_retrieved = sum(1 for paper in retrieved_k if paper in relevant_set)
        return relevant_retrieved / len(retrieved_k)
    
    def calculate_rouge_l(
        self, 
        reference: str, 
        summary: str
    ) -> float:
        """
        计算ROUGE-L分数
        
        Args:
            reference: 参考摘要
            summary: 生成摘要
            
        Returns:
            ROUGE-L F1分数
        """
        scores = self.rouge_scorer.score(reference, summary)
        return scores['rougeL'].fmeasure
    
    def calculate_recall_at_k(
        self, 
        relevant_papers: List[str], 
        retrieved_papers: List[str], 
        k: int = 10
    ) -> float:
        """
        计算Recall@K
        
        Args:
            relevant_papers: 相关论文列表
            retrieved_papers: 检索到的论文列表
            k: K值
            
        Returns:
            Recall@K分数
        """
        relevant_set = set(relevant_papers)
        retrieved_k = retrieved_papers[:k]
        
        if len(relevant_set) == 0:
            return 0.0
        
        relevant_retrieved = sum(1 for paper in retrieved_k if paper in relevant_set)
        return relevant_retrieved / len(relevant_set)
    
    def evaluate_retrieval(
        self, 
        relevant_papers: List[str], 
        retrieved_papers: List[str], 
        k: int = 10
    ) -> Dict:
        """
        评估检索性能
        
        Args:
            relevant_papers: 相关论文列表
            retrieved_papers: 检索到的论文列表
            k: K值
            
        Returns:
            评估结果字典
        """
        precision = self.calculate_precision_at_k(relevant_papers, retrieved_papers, k)
        recall = self.calculate_recall_at_k(relevant_papers, retrieved_papers, k)
        
        f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
        
        return {
            "precision@k": precision,
            "recall@k": recall,
            "f1@k": f1,
            "k": k
        }
    
    def evaluate_summary(
        self, 
        references: List[str], 
        summary: str
    ) -> Dict:
        """
        评估摘要质量
        
        Args:
            references: 参考摘要列表
            summary: 生成摘要
            
        Returns:
            评估结果字典
        """
        # 计算与每个参考摘要的ROUGE-L
        rouge_scores = []
        for ref in references:
            score = self.calculate_rouge_l(ref, summary)
            rouge_scores.append(score)
        
        # 计算平均分数
        avg_rouge_l = sum(rouge_scores) / len(rouge_scores) if rouge_scores else 0
        
        return {
            "rouge_l": avg_rouge_l,
            "rouge_scores": rouge_scores
        }


if __name__ == "__main__":
    # 测试评估指标
    evaluator = MetricsEvaluator()
    
    # 测试检索评估
    relevant = ["PMID1", "PMID2", "PMID3"]
    retrieved = ["PMID1", "PMID4", "PMID2", "PMID5", "PMID3"]
    
    retrieval_results = evaluator.evaluate_retrieval(relevant, retrieved, k=5)
    print("检索评估结果:", retrieval_results)
    
    # 测试摘要评估
    reference = "COVID-19 vaccines provide effective protection against severe disease."
    summary = "Vaccination against COVID-19 offers strong protection from severe illness."
    
    summary_results = evaluator.evaluate_summary([reference], summary)
    print("摘要评估结果:", summary_results)
