"""
检索工具
封装检索接口，为 Agent 提供检索能力
"""

import logging
from typing import List, Dict, Any, Optional
from llama_index.core import VectorStoreIndex

from .hybrid_retriever import HybridRetriever
from .result_processor import ResultProcessor
from ..models.llm_config import LLMConfig
from ..utils.logger import setup_logger


class RetrievalTool:
    """检索工具类"""
    
    def __init__(self, index: VectorStoreIndex, config: LLMConfig):
        """
        初始化检索工具
        
        Args:
            index: 向量索引
            config: 配置对象
        """
        self.index = index
        self.config = config
        self.logger = setup_logger(__name__)
        
        # 初始化组件
        self.hybrid_retriever = HybridRetriever(index, config)
        self.result_processor = ResultProcessor()
        
        self.logger.info("检索工具初始化完成")
    
    def search(
        self, 
        query: str, 
        search_type: str = "hybrid",
        top_k: Optional[int] = None,
        deduplicate: bool = True,
        rerank: bool = True,
        min_score: float = 0.1
    ) -> List[Dict[str, Any]]:
        """
        执行检索
        
        Args:
            query: 查询文本
            search_type: 检索类型 ("vector", "keyword", "hybrid")
            top_k: 返回结果数量限制
            deduplicate: 是否去重
            rerank: 是否重排序
            min_score: 最小分数阈值
            
        Returns:
            检索结果列表
        """
        try:
            self.logger.info(f"开始检索: {query} (类型: {search_type})")
            
            # 执行检索
            nodes = self.hybrid_retriever.search(query, search_type)
            
            if not nodes:
                self.logger.warning("未找到相关结果")
                return []
            
            # 处理结果
            processed_nodes = self.result_processor.process_results(
                nodes=nodes,
                query=query,
                deduplicate=deduplicate,
                rerank=rerank,
                filter_score=True,
                min_score=min_score
            )
            
            # 限制结果数量
            if top_k:
                processed_nodes = processed_nodes[:top_k]
            
            # 格式化结果
            formatted_results = self.result_processor.format_results(processed_nodes)
            
            self.logger.info(f"检索完成，返回 {len(formatted_results)} 个结果")
            return formatted_results
            
        except Exception as e:
            self.logger.error(f"检索失败: {e}")
            return []
    
    def get_context(self, query: str, max_length: int = 2000) -> str:
        """
        获取检索上下文
        
        Args:
            query: 查询文本
            max_length: 最大上下文长度
            
        Returns:
            上下文文本
        """
        try:
            # 执行检索
            results = self.search(query, top_k=5, min_score=0.1)
            
            if not results:
                return ""
            
            # 构建上下文
            context_parts = []
            current_length = 0
            
            for result in results:
                text = result["text"]
                if current_length + len(text) <= max_length:
                    context_parts.append(text)
                    current_length += len(text)
                else:
                    # 截断最后一个结果
                    remaining_length = max_length - current_length
                    if remaining_length > 0:
                        context_parts.append(text[:remaining_length])
                    break
            
            context = "\n\n".join(context_parts)
            self.logger.info(f"生成上下文，长度: {len(context)}")
            
            return context
            
        except Exception as e:
            self.logger.error(f"获取上下文失败: {e}")
            return ""
    
    def search_with_metadata(
        self, 
        query: str, 
        metadata_filter: Optional[Dict[str, Any]] = None,
        **kwargs
    ) -> List[Dict[str, Any]]:
        """
        带元数据过滤的检索
        
        Args:
            query: 查询文本
            metadata_filter: 元数据过滤条件
            **kwargs: 其他检索参数
            
        Returns:
            检索结果列表
        """
        try:
            # 执行基础检索
            results = self.search(query, **kwargs)
            
            if not metadata_filter:
                return results
            
            # 应用元数据过滤
            filtered_results = []
            for result in results:
                metadata = result.get("metadata", {})
                
                # 检查是否满足过滤条件
                match = True
                for key, value in metadata_filter.items():
                    if key not in metadata or metadata[key] != value:
                        match = False
                        break
                
                if match:
                    filtered_results.append(result)
            
            self.logger.info(f"元数据过滤完成: {len(results)} -> {len(filtered_results)}")
            return filtered_results
            
        except Exception as e:
            self.logger.error(f"带元数据过滤的检索失败: {e}")
            return []
    
    def generate_answer(self, query: str, max_context_length: int = 2000) -> str:
        """
        基于检索结果生成答案
        
        Args:
            query: 用户查询
            max_context_length: 最大上下文长度
            
        Returns:
            生成的答案
        """
        try:
            self.logger.info(f"开始生成答案: {query}")
            
            # 获取检索上下文
            context = self.get_context(query, max_context_length)
            
            if not context:
                return "抱歉，我没有找到相关的信息来回答您的问题。"
            
            # 构建提示词
            prompt = f"""基于以下检索到的信息，请回答用户的问题。

检索到的信息：
{context}

用户问题：{query}

请基于上述信息给出详细、准确的回答。如果信息不足以回答问题，请说明。回答要简洁明了，重点突出。"""

            # 使用 LLM 生成答案
            from llama_index.core import Settings
            response = Settings.llm.complete(prompt)
            
            answer = response.text.strip()
            self.logger.info(f"答案生成完成，长度: {len(answer)}")
            
            return answer
            
        except Exception as e:
            self.logger.error(f"生成答案失败: {e}")
            return f"抱歉，生成答案时出现错误: {str(e)}"
    
    def get_statistics(self) -> Dict[str, Any]:
        """
        获取检索统计信息
        
        Returns:
            统计信息字典
        """
        try:
            # 这里可以添加更多统计信息
            stats = {
                "collection_name": self.config.collection_name,
                "similarity_top_k": self.config.similarity_top_k,
                "keyword_top_k": self.config.keyword_top_k,
                "chunk_size": self.config.chunk_size,
                "chunk_overlap": self.config.chunk_overlap
            }
            
            return stats
            
        except Exception as e:
            self.logger.error(f"获取统计信息失败: {e}")
            return {}
