"""
查询引擎核心模块

提供RAG查询引擎的核心功能实现。
"""

from typing import List, Optional, Dict, Any, Union, Generator
from dataclasses import dataclass
import time

from llama_index.core import VectorStoreIndex
from llama_index.core.schema import NodeWithScore, QueryBundle, QueryType
from llama_index.core.query_engine import BaseQueryEngine
from llama_index.core.response import Response
from llama_index.core.base.response.schema import StreamingResponse

# 尝试导入BaseNodePostprocessor，如果失败则使用try-except
try:
    from llama_index.core.postprocessor import BaseNodePostprocessor
except ImportError:
    # 在较新版本的LlamaIndex中，这个类可能位于不同的位置
    try:
        from llama_index.core.schema import BaseNodePostprocessor
    except ImportError:
        # 如果都找不到，定义一个基础类
        from abc import ABC
        class BaseNodePostprocessor(ABC):
            pass

from ..data_storage import index_manager
from ..generation import llm_manager
from .strategies import RetrievalStrategy, RetrievalConfig, RetrievalMode
from .retrievers import CustomRetriever, MultiRetriever, AdaptiveRetriever


@dataclass
class QueryResult:
    """查询结果"""
    query: str
    answer: str
    sources: List[Dict[str, Any]]
    response_metadata: Dict[str, Any]
    retrieval_time: float
    generation_time: float
    total_time: float


@dataclass
class RetrievalStats:
    """检索统计"""
    total_queries: int = 0
    successful_queries: int = 0
    failed_queries: int = 0
    avg_retrieval_time: float = 0
    avg_generation_time: float = 0
    avg_total_time: float = 0
    cache_hits: int = 0
    cache_misses: int = 0

    def update(self, result: QueryResult, success: bool = True) -> None:
        """更新统计信息"""
        self.total_queries += 1
        if success:
            self.successful_queries += 1
        else:
            self.failed_queries += 1

        # 更新平均时间
        self._update_avg_time('avg_retrieval_time', result.retrieval_time)
        self._update_avg_time('avg_generation_time', result.generation_time)
        self._update_avg_time('avg_total_time', result.total_time)

    def _update_avg_time(self, field: str, new_time: float) -> None:
        """更新平均时间"""
        current_avg = getattr(self, field)
        total_queries = self.total_queries
        if total_queries == 1:
            setattr(self, field, new_time)
        else:
            setattr(self, field, (current_avg * (total_queries - 1) + new_time) / total_queries)


class RAGQueryEngine:
    """
    RAG查询引擎 - 核心实现

    提供完整的RAG功能，包括检索增强生成、多策略支持、性能监控等。
    """

    def __init__(
        self,
        index: Optional[VectorStoreIndex] = None,
        strategy: Optional[RetrievalStrategy] = None,
        config: Optional[RetrievalConfig] = None
    ):
        """
        初始化RAG查询引擎

        Args:
            index: 向量索引实例
            strategy: 检索策略
            config: 检索配置
        """
        self._index = index
        self._strategy = strategy
        self._config = config or RetrievalConfig()
        self._query_engine: Optional[BaseQueryEngine] = None
        self._retriever: Optional[CustomRetriever] = None

        # 性能统计
        self.stats = RetrievalStats()
        self.query_cache = {}  # 简单的查询缓存
        self.enable_cache = True

        # 延迟初始化，直到实际需要使用时

    def _initialize_engine(self) -> None:
        """初始化查询引擎和检索器"""
        # 使用索引管理器的索引（如果未提供索引）
        if self._index is None:
            self._index = index_manager.index

        if self._index is None:
            raise RuntimeError("索引未加载，请先创建或加载索引")

        # 确保LLM已配置
        llm_manager.configure_llama_index()

        # 创建检索器
        if self._strategy:
            self._retriever = CustomRetriever(self._index, self._strategy)
        else:
            # 使用默认检索器
            from .strategies import RetrievalStrategyFactory, RetrievalMode
            default_strategy = RetrievalStrategyFactory.create_strategy(RetrievalMode.STANDARD)
            self._retriever = CustomRetriever(self._index, default_strategy)

        # 创建查询引擎
        self._query_engine = self._create_query_engine()

        print("RAG查询引擎初始化完成")

    def _create_query_engine(self, streaming: bool = False) -> BaseQueryEngine:
        """创建查询引擎"""
        from llama_index.core.query_engine import RetrieverQueryEngine
        from llama_index.core import get_response_synthesizer

        # 创建合成器
        synthesizer = get_response_synthesizer(
            response_mode=self._config.response_mode.value,
            streaming=streaming
        )
        
        return RetrieverQueryEngine(
            retriever=self._retriever,
            response_synthesizer=synthesizer,
            node_postprocessors=self._create_postprocessors()
        )

    def _create_postprocessors(self) -> List[BaseNodePostprocessor]:
        """创建后处理器"""
        postprocessors = []

        # 根据配置添加后处理器
        if self._config.node_postprocessors:
            from llama_index.core.postprocessor import SimilarityPostprocessor, KeywordPostprocessor

            for processor_name in self._config.node_postprocessors:
                if processor_name == "SimilarityPostprocessor":
                    postprocessors.append(SimilarityPostprocessor(similarity_cutoff=0.7))
                elif processor_name == "KeywordPostprocessor":
                    postprocessors.append(KeywordPostprocessor(required_keywords=[]))

        return postprocessors

    def query(self, query_text: str, **kwargs) -> Response:
        """执行查询"""
        if self._query_engine is None:
            self._initialize_engine()

        cache_key = hash(query_text)
        if self.enable_cache and cache_key in self.query_cache:
            self.stats.cache_hits += 1
            print(f"从缓存返回查询结果: {query_text}")
            return self.query_cache[cache_key]

        self.stats.cache_misses += 1
        print(f"正在执行查询: {query_text}")

        start_time = time.time()

        try:
            response = self._query_engine.query(query_text, **kwargs)
            total_time = time.time() - start_time
            print(f"查询执行完成，耗时: {total_time:.2f}秒")

            if self.enable_cache:
                self.query_cache[cache_key] = response

            return response

        except Exception as e:
            print(f"查询执行失败: {e}")
            self.stats.update(QueryResult(
                query=query_text,
                answer=f"查询失败: {str(e)}",
                sources=[],
                response_metadata={'error': str(e)},
                retrieval_time=0,
                generation_time=0,
                total_time=time.time() - start_time
            ), success=False)
            raise

    def query_stream(self, query_text: str) -> Generator[Union[str, Dict[str, Any]], None, None]:
        """
        执行流式查询
        Yields: str (token) or dict (sources info)
        """
        if self._retriever is None:
            self._initialize_engine()
        
        # 为流式请求创建一个临时的流式查询引擎
        streaming_engine = self._create_query_engine(streaming=True)
        
        start_time = time.time()
        try:
            response = streaming_engine.query(query_text)
            final_response_txt = ""
            
            is_streaming = isinstance(response, StreamingResponse)
            resp_gen = getattr(response, "response_gen", None) if is_streaming else None

            if not is_streaming or resp_gen is None:
                # 非流式或无生成器，回退为整体字符串
                text = "" if response is None else str(response)
                final_response_txt = text
                if text:
                    yield text
            else:
                # 流式响应
                for text in resp_gen:
                    if text is None:
                        continue
                    chunk = str(text)
                    final_response_txt += chunk
                    yield chunk

            # 构建结果对象
            total_time = time.time() - start_time
            
            sources = []
            if hasattr(response, 'source_nodes'):
                for i, node in enumerate(response.source_nodes):
                    sources.append({
                        'index': i + 1,
                        'score': node.score if hasattr(node, 'score') else None,
                        'text': node.text[:200] + "..." if len(node.text) > 200 else node.text,
                        'metadata': node.metadata if hasattr(node, 'metadata') else {},
                        'file_path': node.metadata.get('file_name') if node.metadata else None
                    })
            
            result = QueryResult(
                query=query_text,
                answer=final_response_txt,
                sources=sources,
                response_metadata=getattr(response, 'metadata', {}),
                retrieval_time=0,
                generation_time=total_time,
                total_time=total_time
            )
            
            # 更新统计
            self.stats.update(result)
            
            # 最后 yield 来源信息
            yield {"__type__": "sources", "data": sources}

        except Exception as e:
            # 捕获底层 LLM 流式异常，回退为非流式完整回答，避免前端报错
            print(f"流式查询失败，回退非流式: {e}")
            try:
                fallback_response = self.query(query_text)
                text = "" if fallback_response is None else str(fallback_response)
                if text:
                    yield text
                # 构建并更新统计
                sources = []
                if hasattr(fallback_response, 'source_nodes'):
                    for i, node in enumerate(fallback_response.source_nodes):
                        sources.append({
                            'index': i + 1,
                            'score': node.score if hasattr(node, 'score') else None,
                            'text': node.text[:200] + "..." if len(node.text) > 200 else node.text,
                            'metadata': node.metadata if hasattr(node, 'metadata') else {},
                            'file_path': node.metadata.get('file_name') if node.metadata else None
                        })
                total_time = time.time() - start_time
                result = QueryResult(
                    query=query_text,
                    answer=text,
                    sources=sources,
                    response_metadata=getattr(fallback_response, 'metadata', {}),
                    retrieval_time=0,
                    generation_time=total_time,
                    total_time=total_time
                )
                self.stats.update(result)
                yield {"__type__": "sources", "data": sources}
            except Exception as ee:
                print(f"回退非流式也失败: {ee}")
                yield f"[ERROR]LLM流式API调用失败: {e}"

    def query_with_details(self, query_text: str) -> QueryResult:
        """执行查询并返回详细信息"""
        start_time = time.time()
        response = self.query(query_text)

        total_time = time.time() - start_time
        retrieval_time = 0.0
        if hasattr(response, "metadata"):
            retrieval_time = float(getattr(response, "metadata", {}).get("retrieval_time", 0.0) or 0.0)
        if retrieval_time <= 0:
            retrieval_time = total_time
        generation_time = max(total_time - retrieval_time, 0.0)

        sources = []
        if hasattr(response, 'source_nodes'):
            for i, node in enumerate(response.source_nodes):
                sources.append({
                    'index': i + 1,
                    'score': node.score if hasattr(node, 'score') else None,
                    'text': node.text[:200] + "..." if len(node.text) > 200 else node.text,
                    'metadata': node.metadata if hasattr(node, 'metadata') else {},
                    'file_path': node.metadata.get('file_name') if node.metadata else None
                })

        result = QueryResult(
            query=query_text,
            answer=str(response),
            sources=sources,
            response_metadata=getattr(response, 'metadata', {}),
            retrieval_time=retrieval_time,
            generation_time=generation_time,
            total_time=total_time
        )
        self.stats.update(result)
        return result

    # ... (rest of methods: similarity_search, update_strategy, etc. kept as is)
    def similarity_search(self, query_text: str, top_k: int = 5) -> List[NodeWithScore]:
        if self._retriever is None:
            self._initialize_engine()
        original_top_k = self._config.similarity_top_k
        self._config.similarity_top_k = top_k
        try:
            return self._retriever.retrieve(QueryBundle(query_str=query_text))
        finally:
            self._config.similarity_top_k = original_top_k

    def update_strategy(self, strategy: RetrievalStrategy) -> None:
        self._strategy = strategy
        self._config = strategy.config
        if self._index:
            self._retriever = CustomRetriever(self._index, strategy)
            self._query_engine = self._create_query_engine()
        print(f"检索策略已更新为: {type(strategy).__name__}")

    def update_config(self, config: RetrievalConfig) -> None:
        self._config = config
        if self._index:
            self._query_engine = self._create_query_engine()
        print("检索配置已更新")

    def update_index(self, new_index: VectorStoreIndex) -> None:
        self._index = new_index
        self.clear_cache()
        self.stats = RetrievalStats()
        self._initialize_engine()
        print("查询引擎索引已更新")

    def add_postprocessor(self, postprocessor: BaseNodePostprocessor) -> None:
        if self._retriever:
            self._retriever.add_postprocessor(postprocessor)
            self._query_engine = self._create_query_engine()

    def remove_postprocessor(self, postprocessor_class: type) -> None:
        if self._retriever:
            self._retriever.remove_postprocessor(postprocessor_class)
            self._query_engine = self._create_query_engine()

    def get_engine_info(self) -> Dict[str, Any]:
        return {
            'is_initialized': self._query_engine is not None,
            'has_index': self._index is not None,
            'strategy': type(self._strategy).__name__ if self._strategy else None,
            'config': self._config.to_dict(),
            'llm_model': llm_manager.model_info,
            'index_info': index_manager.get_index_info() if index_manager.index else None,
            'cache_enabled': self.enable_cache,
            'cache_size': len(self.query_cache),
            'stats': {
                'total_queries': self.stats.total_queries,
                'success_rate': self.stats.successful_queries / self.stats.total_queries if self.stats.total_queries > 0 else 0,
                'avg_response_time': self.stats.avg_total_time,
                'cache_hit_rate': self.stats.cache_hits / (self.stats.cache_hits + self.stats.cache_misses) if (self.stats.cache_hits + self.stats.cache_misses) > 0 else 0
            }
        }

    def clear_cache(self) -> None:
        self.query_cache.clear()
        print("查询缓存已清空")

    def enable_query_cache(self, enabled: bool = True) -> None:
        self.enable_cache = enabled
        if not enabled:
            self.clear_cache()

    def export_stats(self) -> Dict[str, Any]:
        return {
            'retrieval_stats': {
                'total_queries': self.stats.total_queries,
                'successful_queries': self.stats.successful_queries,
                'failed_queries': self.stats.failed_queries,
                'success_rate': self.stats.successful_queries / self.stats.total_queries if self.stats.total_queries > 0 else 0,
                'avg_retrieval_time': self.stats.avg_retrieval_time,
                'avg_generation_time': self.stats.avg_generation_time,
                'avg_total_time': self.stats.avg_total_time
            },
            'cache_stats': {
                'enabled': self.enable_cache,
                'size': len(self.query_cache),
                'hits': self.stats.cache_hits,
                'misses': self.stats.cache_misses,
                'hit_rate': self.stats.cache_hits / (self.stats.cache_hits + self.stats.cache_misses) if (self.stats.cache_hits + self.stats.cache_misses) > 0 else 0
            },
            'engine_info': self.get_engine_info()
        }

    @property
    def engine(self) -> BaseQueryEngine:
        if self._query_engine is None:
            self._initialize_engine()
        return self._query_engine

    @property
    def retriever(self) -> CustomRetriever:
        if self._retriever is None:
            self._initialize_engine()
        return self._retriever

    def __str__(self) -> str:
        info = self.get_engine_info()
        return f"""
=== RAG查询引擎信息 ===
初始化状态: {'已初始化' if info['is_initialized'] else '未初始化'}
索引状态: {'可用' if info['has_index'] else '不可用'}
检索策略: {info['strategy'] or '默认'}
LLM模型: {info['llm_model']['model_name']}
缓存状态: {'启用' if info['cache_enabled'] else '禁用'} ({info['cache_size']} 条)
查询统计: {info['stats']['total_queries']} 次, 成功率 {info['stats']['success_rate']:.1%}
        """.strip()
