# -*- coding: utf-8 -*-
"""
检索评估指标
提供NDCG、MRR、MAP等标准检索评估指标
"""

import numpy as np
from typing import List, Dict, Any, Optional, Set
import math
from sklearn.preprocessing import MinMaxScaler
from loguru import logger


class RetrievalEvaluator:
    """
    检索评估器
    提供标准的信息检索评估指标
    """
    
    @staticmethod
    def calculate_dcg(relevance_scores: List[float], k: Optional[int] = None) -> float:
        """
        计算DCG (Discounted Cumulative Gain)
        
        Args:
            relevance_scores: 相关性分数列表，按实际排序顺序
            k: 考虑的top-k结果，如果为None则考虑所有结果
            
        Returns:
            float: DCG值
        """
        if not relevance_scores:
            return 0.0
            
        scores_to_use = relevance_scores[:k] if k is not None else relevance_scores
        
        dcg = 0.0
        for i, score in enumerate(scores_to_use):
            pos = i + 1
            # 使用更科学的指数增益函数
            dcg += (2 ** score - 1) / math.log2(pos + 1)
            
        return dcg
    
    @staticmethod
    def calculate_ideal_dcg(relevance_scores: List[float], k: Optional[int] = None) -> float:
        """
        计算理想DCG（用于NDCG计算）
        
        Args:
            relevance_scores: 相关性分数列表
            k: 考虑的top-k结果，如果为None则考虑所有结果
            
        Returns:
            float: 理想DCG值
        """
        if not relevance_scores:
            return 0.0
            
        # 按相关性降序排序
        sorted_scores = sorted(relevance_scores, reverse=True)
        
        if k is not None:
            sorted_scores = sorted_scores[:k]
        
        dcg = 0.0
        for i, score in enumerate(sorted_scores):
            pos = i + 1
            dcg += (2 ** score - 1) / math.log2(pos + 1)
            
        return dcg
    
    @staticmethod
    def calculate_ndcg(relevance_scores: List[float], k: Optional[int] = None) -> float:
        """
        计算科学化的NDCG (Normalized Discounted Cumulative Gain)
        
        Args:
            relevance_scores: 相关性分数列表，按排序后的顺序
            k: 考虑的top-k结果，如果为None则考虑所有结果
            
        Returns:
            float: NDCG@k分数，范围[0, 1]
        """
        if not relevance_scores:
            return 0.0
            
        # 计算实际DCG
        dcg = RetrievalEvaluator.calculate_dcg(relevance_scores, k)
        
        # 计算理想DCG
        idcg = RetrievalEvaluator.calculate_ideal_dcg(relevance_scores, k)
        
        return dcg / idcg if idcg > 0 else 0.0
    
    @staticmethod
    def calculate_mrr(relevance_scores: List[float]) -> float:
        """
        计算MRR (Mean Reciprocal Rank)
        
        Args:
            relevance_scores: 相关性分数列表，按排序后的顺序
            
        Returns:
            float: MRR分数，范围[0, 1]
        """
        if not relevance_scores:
            return 0.0
            
        # 找到第一个非零相关性文档的位置
        for i, score in enumerate(relevance_scores):
            if score > 0:
                return 1.0 / (i + 1)
                
        return 0.0
    
    @staticmethod
    def calculate_average_precision(relevance_scores: List[float], k: Optional[int] = None) -> float:
        """
        计算Average Precision (AP)
        
        Args:
            relevance_scores: 相关性分数列表，按排序后的顺序
            k: 考虑的前k个结果，如果为None则考虑所有结果
            
        Returns:
            float: AP分数，范围[0, 1]
        """
        if not relevance_scores:
            return 0.0
            
        scores_to_use = relevance_scores[:k] if k is not None else relevance_scores
        
        # 计算相关文档总数
        total_relevant = sum(1 for score in relevance_scores if score > 0)
        if total_relevant == 0:
            return 0.0
            
        # 计算AP
        relevant_so_far = 0
        ap_sum = 0.0
        
        for i, score in enumerate(scores_to_use):
            if score > 0:  # 相关文档
                relevant_so_far += 1
                precision_at_rank = relevant_so_far / (i + 1)
                ap_sum += precision_at_rank
        
        return ap_sum / total_relevant
    
    @staticmethod
    def calculate_map(relevance_scores_list: List[List[float]], k: Optional[int] = None) -> float:
        """
        计算科学化的MAP (Mean Average Precision)
        
        Args:
            relevance_scores_list: 多个查询的相关性分数列表的列表
            k: 考虑的前k个结果，如果为None则考虑所有结果
            
        Returns:
            float: MAP分数，范围[0, 1]
        """
        if not relevance_scores_list:
            return 0.0
            
        ap_scores = []
        for relevance_scores in relevance_scores_list:
            ap = RetrievalEvaluator.calculate_average_precision(relevance_scores, k)
            ap_scores.append(ap)
            
        return sum(ap_scores) / len(ap_scores) if ap_scores else 0.0
    
    @staticmethod
    def calculate_precision(relevance_scores: List[float], k: int = None) -> float:
        """
        计算科学化的Precision@k
        
        Args:
            relevance_scores: 相关性分数列表，按排序后的顺序
            k: 考虑的前k个结果
            
        Returns:
            float: Precision@k分数，范围[0, 1]
        """
        if not relevance_scores:
            return 0.0
            
        scores_to_use = relevance_scores[:k] if k is not None else relevance_scores
        
        if not scores_to_use:
            return 0.0
            
        relevant_count = sum(1 for score in scores_to_use if score > 0)
        
        return relevant_count / len(scores_to_use)
    
    @staticmethod
    def calculate_recall(relevance_scores: List[float], k: int = None) -> float:
        """
        计算科学化的Recall@k
        
        Args:
            relevance_scores: 相关性分数列表，按排序后的顺序
            k: 考虑的前k个结果
            
        Returns:
            float: Recall@k分数，范围[0, 1]
        """
        if not relevance_scores:
            return 0.0
            
        total_relevant = sum(1 for score in relevance_scores if score > 0)
        if total_relevant == 0:
            return 0.0
            
        relevant_in_top_k = sum(1 for score in relevance_scores[:k] if score > 0)
        
        return relevant_in_top_k / total_relevant
    
    @staticmethod
    def calculate_hit_rate(relevance_scores: List[float]) -> float:
        """
        计算Hit Rate
        
        Args:
            relevance_scores: 相关性分数列表
            
        Returns:
            float: Hit Rate分数
        """
        return 1.0 if any(score > 0 for score in relevance_scores) else 0.0
    
    @classmethod
    def evaluate_query(cls, retrieved_scores: List[float], k_values: List[int] = None) -> Dict[str, float]:
        """
        使用科学化方法评估单个查询的检索结果
        
        Args:
            retrieved_scores: 检索到的文档相关性分数列表
            k_values: 需要计算的k值列表，默认为[1, 3, 5, 10]
            
        Returns:
            Dict[str, float]: 包含各种指标的字典
        """
        if k_values is None:
            k_values = [1, 3, 5, 10]
            
        results = {}
        
        # 计算不同k值的指标
        for k in k_values:
            # Precision@k
            results[f'precision@{k}'] = cls.calculate_precision(retrieved_scores, k)
            
            # Recall@k
            results[f'recall@{k}'] = cls.calculate_recall(retrieved_scores, k)
            
            # NDCG@k
            results[f'ndcg@{k}'] = cls.calculate_ndcg(retrieved_scores, k)
            
            # MAP@k (使用单个查询的AP)
            results[f'map@{k}'] = cls.calculate_average_precision(retrieved_scores, k)
        
        # 计算MRR
        results['mrr'] = cls.calculate_mrr(retrieved_scores)
        
        # 计算Hit Rate
        results['hit_rate'] = cls.calculate_hit_rate(retrieved_scores)
        
        return results
    
    @staticmethod
    def adaptive_relevance_threshold(scores: List[float], method: str = 'percentile', percentile: float = 75.0) -> float:
        """
        自适应相关性阈值计算
        
        Args:
            scores: 模型返回的分数列表
            method: 阈值计算方法 ('percentile', 'mean', 'median', 'otsu')
            percentile: 百分位阈值（当method='percentile'时使用）
            
        Returns:
            float: 自适应计算的相关性阈值
        """
        if not scores:
            return 0.5
            
        scores_array = np.array(scores)
        
        if method == 'percentile':
            return np.percentile(scores_array, percentile)
        elif method == 'mean':
            return np.mean(scores_array)
        elif method == 'median':
            return np.median(scores_array)
        elif method == 'otsu':
            # Otsu阈值法（适用于双峰分布）
            hist, bins = np.histogram(scores_array, bins=50)
            total = scores_array.size
            
            max_var_between = 0
            best_threshold = 0.5
            
            for i in range(1, len(hist)):
                w0 = np.sum(hist[:i]) / total
                w1 = 1 - w0
                
                if w0 == 0 or w1 == 0:
                    continue
                    
                mu0 = np.mean(scores_array[scores_array <= bins[i]])
                mu1 = np.mean(scores_array[scores_array > bins[i]])
                
                var_between = w0 * w1 * (mu0 - mu1) ** 2
                
                if var_between > max_var_between:
                    max_var_between = var_between
                    best_threshold = bins[i]
                    
            return best_threshold
        else:
            return 0.5

    @staticmethod
    def convert_scores_to_relevance(model_scores: List[float], 
                                  threshold_method: str = 'percentile',
                                  percentile: float = 75.0,
                                  manual_threshold: Optional[float] = None) -> List[float]:
        """
        使用科学方法将分数转换为相关性标签
        
        Args:
            model_scores: 模型返回的分数
            threshold_method: 阈值计算方法
            percentile: 百分位阈值
            manual_threshold: 手动指定的阈值（优先级最高）
            
        Returns:
            List[float]: 相关性标签（0或1）
        """
        if not model_scores:
            return []
            
        if manual_threshold is not None:
            threshold = manual_threshold
        else:
            threshold = RetrievalEvaluator.adaptive_relevance_threshold(
                model_scores, threshold_method, percentile)
        
        return [1.0 if score >= threshold else 0.0 for score in model_scores]
    
    @staticmethod
    def normalize_scores(scores: List[float], method: str = 'minmax') -> List[float]:
        """
        科学化的分数标准化
        
        Args:
            scores: 原始分数列表
            method: 标准化方法 ('minmax', 'zscore', 'sigmoid')
            
        Returns:
            List[float]: 标准化后的分数
        """
        if not scores:
            return []
        
        scores_array = np.array(scores).reshape(-1, 1)
        
        if method == 'minmax':
            scaler = MinMaxScaler()
            normalized = scaler.fit_transform(scores_array).flatten()
        elif method == 'zscore':
            mean = np.mean(scores_array)
            std = np.std(scores_array)
            if std > 0:
                normalized = (scores_array.flatten() - mean) / std
                # 将z-score转换到0-1区间
                normalized = 1 / (1 + np.exp(-normalized))
            else:
                normalized = [0.5] * len(scores)
        elif method == 'sigmoid':
            normalized = 1 / (1 + np.exp(-np.array(scores)))
            # 再次标准化到0-1
            normalized = (normalized - np.min(normalized)) / (np.max(normalized) - np.min(normalized))
        else:
            return scores
        
        return normalized.tolist()
    
    @staticmethod
    def scores_to_relevance_labels(scores: List[float], 
                                 threshold_method: str = 'percentile',
                                 percentile: float = 75.0,
                                 manual_threshold: Optional[float] = None) -> List[float]:
        """
        将分数转换为相关性标签
        
        Args:
            scores: 分数列表
            threshold_method: 阈值计算方法
            percentile: 百分位数（当method='percentile'时使用）
            manual_threshold: 手动设置的阈值
            
        Returns:
            List[float]: 相关性标签列表（0-1之间的值）
        """
        if not scores:
            return []
            
        if manual_threshold is not None:
            threshold = manual_threshold
        else:
            threshold = RetrievalEvaluator.adaptive_relevance_threshold(
                scores, method=threshold_method, percentile=percentile
            )
        
        # 将分数转换为相关性标签
        relevance_labels = []
        for score in scores:
            if score >= threshold:
                # 高于阈值的文档，相关性为归一化分数
                relevance_labels.append(min(score / threshold, 1.0))
            else:
                # 低于阈值的文档，相关性为0
                relevance_labels.append(0.0)
                
        return relevance_labels