"""
检索结果处理器
负责去重、重排序等后处理操作
"""

import logging
from typing import List, Dict, Any, Optional
from llama_index.core.schema import NodeWithScore

from ..utils.logger import setup_logger
from ..utils.text_utils import calculate_similarity, remove_duplicates_by_similarity


class ResultProcessor:
    """检索结果处理器"""
    
    def __init__(self, similarity_threshold: float = 0.8):
        """
        初始化结果处理器
        
        Args:
            similarity_threshold: 相似度阈值，用于去重
        """
        self.similarity_threshold = similarity_threshold
        self.logger = setup_logger(__name__)
    
    def deduplicate_by_content(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]:
        """
        基于内容相似度去重
        
        Args:
            nodes: 节点列表
            
        Returns:
            去重后的节点列表
        """
        if not nodes:
            return []
        
        try:
            # 提取文本内容
            texts = [node.node.text for node in nodes]
            
            # 去重
            unique_texts = remove_duplicates_by_similarity(
                texts, self.similarity_threshold
            )
            
            # 保留对应的节点
            unique_nodes = []
            seen_texts = set()
            
            for node in nodes:
                node_text = node.node.text
                if node_text not in seen_texts:
                    # 检查是否与已保留的文本相似
                    is_duplicate = False
                    for seen_text in seen_texts:
                        if calculate_similarity(node_text, seen_text) >= self.similarity_threshold:
                            is_duplicate = True
                            break
                    
                    if not is_duplicate:
                        unique_nodes.append(node)
                        seen_texts.add(node_text)
            
            self.logger.info(f"去重完成: {len(nodes)} -> {len(unique_nodes)}")
            return unique_nodes
            
        except Exception as e:
            self.logger.error(f"去重失败: {e}")
            return nodes
    
    def rerank_by_relevance(self, nodes: List[NodeWithScore], query: str) -> List[NodeWithScore]:
        """
        基于相关性重排序
        
        Args:
            nodes: 节点列表
            query: 查询文本
            
        Returns:
            重排序后的节点列表
        """
        if not nodes or not query:
            return nodes
        
        try:
            # 计算每个节点与查询的相关性分数
            scored_nodes = []
            for node in nodes:
                # 基础分数（来自向量检索）
                base_score = node.score
                
                # 文本相似度分数
                text_similarity = calculate_similarity(
                    query.lower(), 
                    node.node.text.lower()
                )
                
                # 关键词匹配分数
                query_words = set(query.lower().split())
                text_words = set(node.node.text.lower().split())
                keyword_overlap = len(query_words.intersection(text_words)) / max(len(query_words), 1)
                
                # 综合分数
                final_score = (
                    base_score * 0.6 +  # 向量相似度权重
                    text_similarity * 0.3 +  # 文本相似度权重
                    keyword_overlap * 0.1  # 关键词匹配权重
                )
                
                # 创建新的节点对象（保持原有属性）
                new_node = NodeWithScore(
                    node=node.node,
                    score=final_score
                )
                scored_nodes.append(new_node)
            
            # 按分数排序
            scored_nodes.sort(key=lambda x: x.score, reverse=True)
            
            self.logger.info(f"重排序完成，共 {len(scored_nodes)} 个结果")
            return scored_nodes
            
        except Exception as e:
            self.logger.error(f"重排序失败: {e}")
            return nodes
    
    def filter_by_score(self, nodes: List[NodeWithScore], min_score: float = 0.5) -> List[NodeWithScore]:
        """
        基于分数过滤
        
        Args:
            nodes: 节点列表
            min_score: 最小分数阈值
            
        Returns:
            过滤后的节点列表
        """
        if not nodes:
            return []
        
        try:
            filtered_nodes = [node for node in nodes if node.score >= min_score]
            
            self.logger.info(f"分数过滤完成: {len(nodes)} -> {len(filtered_nodes)}")
            return filtered_nodes
            
        except Exception as e:
            self.logger.error(f"分数过滤失败: {e}")
            return nodes
    
    def process_results(
        self, 
        nodes: List[NodeWithScore], 
        query: str,
        deduplicate: bool = True,
        rerank: bool = True,
        filter_score: bool = True,
        min_score: float = 0.5
    ) -> List[NodeWithScore]:
        """
        处理检索结果
        
        Args:
            nodes: 原始节点列表
            query: 查询文本
            deduplicate: 是否去重
            rerank: 是否重排序
            filter_score: 是否过滤分数
            min_score: 最小分数阈值
            
        Returns:
            处理后的节点列表
        """
        try:
            self.logger.info(f"开始处理 {len(nodes)} 个检索结果")
            
            processed_nodes = nodes.copy()
            
            # 去重
            if deduplicate:
                processed_nodes = self.deduplicate_by_content(processed_nodes)
            
            # 重排序
            if rerank:
                processed_nodes = self.rerank_by_relevance(processed_nodes, query)
            
            # 分数过滤
            if filter_score:
                processed_nodes = self.filter_by_score(processed_nodes, min_score)
            
            self.logger.info(f"结果处理完成，返回 {len(processed_nodes)} 个结果")
            return processed_nodes
            
        except Exception as e:
            self.logger.error(f"处理检索结果失败: {e}")
            return nodes
    
    def format_results(self, nodes: List[NodeWithScore]) -> List[Dict[str, Any]]:
        """
        格式化检索结果
        
        Args:
            nodes: 节点列表
            
        Returns:
            格式化后的结果列表
        """
        try:
            formatted_results = []
            
            for i, node in enumerate(nodes, 1):
                result = {
                    "rank": i,
                    "score": round(node.score, 4),
                    "text": node.node.text[:200] + "..." if len(node.node.text) > 200 else node.node.text,
                    "metadata": node.node.metadata,
                    "node_id": node.node.node_id
                }
                formatted_results.append(result)
            
            return formatted_results
            
        except Exception as e:
            self.logger.error(f"格式化结果失败: {e}")
            return []
