import uuid
from typing import Dict, Any, Optional, List

from llama_index.core import QueryBundle
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.llms.types import ChatMessage, TextBlock
from llama_index.core.chat_engine import CondensePlusContextChatEngine, SimpleChatEngine
from llama_index.core.chat_engine.types import AgentChatResponse
from llama_index.core.indices.property_graph import LLMSynonymRetriever
from llama_index.core.memory import Memory, StaticMemoryBlock, FactExtractionMemoryBlock, VectorMemoryBlock
from llama_index.core.postprocessor import LongContextReorder
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.retrievers.fusion_retriever import FUSION_MODES
from llama_index.core.schema import NodeWithScore, TextNode
from llama_index.core.vector_stores import MetadataFilters, MetadataFilter, FilterOperator
from loguru import logger

from ai_platform.config.resource import get_property_graph_index, get_llm, get_tree_index, get_cross_encoder_reranker, get_llm_reranker, get_embedding


class EmptyRetriever(BaseRetriever):
    """
    一个总是返回空结果的检索器。
    """

    def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
        # 直接返回一个空列表，不执行任何操作
        print("调用了 EmptyRetriever，将返回空结果。")
        return [NodeWithScore(node=TextNode(text="没有提供任何信息", id=str(uuid.uuid4()), meta_data={}), score=0.0)]


class RAGQueryEngine:
    """RAG查询引擎 - 检索增强生成"""

    def __init__(self, session_id: str, reranker=None):
        self.session_id = session_id
        self.reranker = reranker
        from ..config.resource import get_llm, get_vector_index, get_db_engine, get_property_graph_index, get_chat_context_db
        self._llm = get_llm()
        self._index = get_vector_index()
        self._tree_index = get_tree_index()
        self._graph_index = get_property_graph_index()
        self.memory = Memory.from_defaults(
            session_id=session_id,
            token_limit=3000,
            token_flush_size=500,
            # memory_blocks=[
            #     StaticMemoryBlock(
            #         name="core_info",
            #         static_content=[TextBlock(
            #             text="你是一个AI智能助手，可以友好地基于提供的上下文信息回答用户的问题，如果上下文信息中没有相关内容，你可以告诉用户你无法回答该问题")],
            #         priority=0,
            #     ),
            #     FactExtractionMemoryBlock(
            #         name="extracted_info",
            #         llm=self._llm,
            #         max_facts=20,
            #         priority=1,
            #     ),
            #     VectorMemoryBlock(
            #         name="vector_memory",
            #         vector_store=get_chat_context_db(),
            #         priority=2,
            #         embed_model=get_embedding(),
            #     ),
            #
            # ],
            table_name="chat_history",
            chat_history_token_ratio=0.5,
            async_engine=get_db_engine()
        )

        logger.info(f"RAG查询引擎初始化成功 - session_id: {session_id}")

    def create_engine(self, knowledge_base_ids: List[str] | None = None, index_type: str = "vector"):
        # 这里我们根据知识库ID创建一个过滤器，用于实现根据不同的知识库来回答问题

        #"knowledge_base_id": "20e12a6d-859d-4cb3-9ebc-699fd4aa8278",
        filters = MetadataFilters(
            filters=[
                MetadataFilter(key="knowledge_base_id", value=knowledge_base_ids, operator=FilterOperator.IN),
            ]
        )
        # 根据索引类型配置检索参数,根据你的需要去调整
        similarity_top_k = 5

        # 如果没有传入知识库ID，则使用空检索器
        retriever = self._index.as_retriever(filters=filters, similarity_top_k=similarity_top_k) if knowledge_base_ids else EmptyRetriever()
        tree_retriever = self._tree_index.as_retriever(filters=filters, similarity_top_k=similarity_top_k) if knowledge_base_ids else EmptyRetriever()
        graph_retriever = self._graph_index.as_retriever(filters=filters, similarity_top_k=similarity_top_k) if knowledge_base_ids else EmptyRetriever()
        synonym_retriever = LLMSynonymRetriever(
            get_property_graph_index().property_graph_store,
            llm=get_llm(),
        )
        num_queries = 2
        hybrid_retriever = QueryFusionRetriever(
            retrievers=[graph_retriever, retriever, tree_retriever, synonym_retriever],
            similarity_top_k=similarity_top_k,
            num_queries=num_queries,
            mode=FUSION_MODES.RECIPROCAL_RANK,
            use_async=True,
            verbose=True
        )

        # 该查询引擎可以结合会话历史自动来进行上下文的管理
        return CondensePlusContextChatEngine.from_defaults(
            retriever=hybrid_retriever,
            memory=self.memory,
            llm=self._llm,
            node_postprocessors=[get_llm_reranker(), LongContextReorder()])

    async def query(self, question: str, use_reranker: bool = True, knowledge_base_ids: Optional[List[str]] = None, index_type: str = "vector") -> \
        Dict[str, Any]:
        """执行查询"""
        try:
            logger.info(f"RAG查询: {question} (session: {self.session_id}, knowledge_bases: {knowledge_base_ids}, index_type: {index_type})")
            query_engine = self.create_engine(knowledge_base_ids=knowledge_base_ids, index_type=index_type)
            response = await  query_engine.achat(question)
            sources = []
            response_text = ""
            if response:
                if hasattr(response, 'response') and response.response:
                    response_text = response.response
                else:
                    response_text = str(response) if response else "无法生成回复"
                if hasattr(response, 'source_nodes') and response.source_nodes:
                    sources = response.source_nodes

            logger.info(f"检索到数据：{sources}")
            # TODO 可以再优化
            if response_text == 'Empty Response':
                logger.info("RAG查询返回空响应，尝试使用备份查询引擎")
                bak_query_engine = SimpleChatEngine.from_defaults(llm=self._llm, memory=self.memory)
                bak_response: AgentChatResponse = await bak_query_engine.achat(question)
                response_text = (f"{bak_response.response}"
                                 f"\n\n-----\n【知识库中没有相关信息。以上内容基于AI自身知识生成的回答。AI可能存在幻觉，请仔细辨别】")

            result = {
                "query": question,
                "response": response_text,
                "sources": sources,
                "metadata": {
                    "retrieved_count": len(sources),
                    "used_reranker": use_reranker and self.reranker is not None,
                    "session_id": self.session_id,
                    "knowledge_base_ids": knowledge_base_ids
                }
            }
            logger.info("RAG查询完成")
            return result

        except Exception as e:
            logger.exception(f"RAG查询失败: {e}")
            return {
                "query": question,
                "response": f"查询处理失败: {str(e)}",
                "sources": [],
                "metadata": {"error": str(e), "session_id": self.session_id}
            }

    async def get_chat_history(self, limit: int, offset: int) -> List[ChatMessage]:
        """获取聊天历史"""
        messages = await self.memory.sql_store.get_messages(key=self.session_id, limit=limit, offset=offset)
        return messages
