"""整合RAG流程，生成最终回答"""
import datetime

from langchain.docstore.document import Document


from .prompts import RAGPrompts
from .query_classifier import QAClassifier
from .strategy_selector import StrategySelector
from .vector_store import VectorStore
from src.spark_edu_rag.utils import get_logger, config_ini

# todo:RAGSystem类，用于整合RAG流程，生成最终回答
class RAGSystem:
    def __init__(self, vector_store: VectorStore, llm):
        self.logger = get_logger(__name__)
        self.rag_prompts = RAGPrompts.rag_prompt()
        self.query_classifier = QAClassifier()
        self.strategy_selector = StrategySelector()
        self.vector_store = vector_store
        self.llm = llm

    def generate_answer(self, query, source_filter: str=None):
        """处理用户查询，生成最终答案（分类、检索、生成全流程）"""
        #1、记录查询
        start_time = datetime.datetime.now()
        self.logger.debug(f"开始时间：{start_time}, 查询：{query}, 来源筛选：{source_filter}")
        # 通用查询分类器
        query_type = self.query_classifier.predict_category(query)
        self.logger.debug(f"查询分类结果：{query_type}")

        # 选择检索策略，根据查询类型和来源筛选
        strategy = self.strategy_selector.select_strategy(query_type)
        self.logger.debug(f"选择的检索策略：{strategy}")
        # 执行检索策略
        context = self.retrieval_execute(query, source_filter, strategy)
        self.logger.debug(f"检索到的上下文：{context}")

        if context :
            context = "\n\n".join([ chunk.page_content for chunk in context])
            self.logger.info(f"最终的上下文数据：{len(context)}")
        else:
            context = ""
            self.logger.error("检索到的上下文为空")
        final_prompt = self.rag_prompts.format(
            context = context,
            question = query
        )
        try:
            answer = self.llm(final_prompt)
            self.logger.debug(f"最终回答：{answer}")
        except Exception as e:
            self.logger.error(f"LLM调用出错：{e}")
            answer = "抱歉，我无法回答这个问题，请联系人工客服"
        self.logger.info(f"最终回答记录：{query}, 分类结果：{query_type}, 回答：{answer}, 耗时：{datetime.datetime.now() - start_time}")


        return answer

    def retrieval_execute(self, query, source_filter, strategy):
        """
        执行检索策略，根据指定策略或者自动选择策略，去检索文档，返回检索到的上下文
        :query (str): 用户查询
        :source_filter (str, optional): 来源筛选条件，默认None
        :strategy (str, optional): 检索策略，默认None
        return str: 检索到的上下文
        """
        if strategy is None:
            strategy = self.strategy_selector.select_strategy(query)

        ranked_chunks = []
        if strategy == "回溯问题检索":
            ranked_chunks = self._retrieval_with_backtrack(query, source_filter)
        elif strategy == "子查询检索":
            ranked_chunks = self._retrieval_with_subquery(query, source_filter)
        elif strategy == "假设问题检索":
            ranked_chunks = self._retrieval_with_hypothesis(query, source_filter)
        else:
            self.logger.error(f"直接检索策略查询: {query}")
            ranked_chunks = self.vector_store.hybrid_search_with_rerank(query, source_filter, top_k=5)

        # 合并检索到的上下文
        self.logger.debug(f"合并检索到的上下文：{ranked_chunks},检索策略：{strategy}")
        final_context = ranked_chunks[:5]
        return final_context


    def _retrieval_with_backtrack(self, query: str, source_filter: str=None) -> list[Document]:
        """
        回溯问题检索策略，将复杂的用户查询转化为更基础、更易于检索的问题，然后进行检索。
        :query (str): 用户原始查询，通常是逻辑复杂或者含场景的问题
        :source_filter (str, optional): 来源筛选条件，默认None，如： "ai",表示只检索ai相关的文档
        return list: 检索到的上下文列表
        """
        self.logger.info(f"回溯问题检索：{query}, 来源筛选：{source_filter}")
        prompt_query = RAGPrompts.backtracking_prompt().format(
            query = query
        )
        try:
            backtrack_query = self.llm(prompt_query)
            self.logger.debug(f"回溯问题检索结果：{backtrack_query}")
            return self.vector_store.hybrid_search_with_rerank(backtrack_query, source_filter, top_k=5)
        except Exception as e:
            self.logger.error(f"回溯问题检索出错：{e}")
            return []



    def _retrieval_with_subquery(self, query, source_filter):
        """
        子查询检索策略，将复杂的用户查询分解为多个简单子查询，每个子查询单独检索，最后合并结果。
        :query (str): 用户原始查询，通常是逻辑复杂或者含场景的问题
        :source_filter (str, optional): 来源筛选条件，默认None
        return list[Document]: 检索到的上下文列表
        """
        self.logger.info(f"子查询检索：{query}, 来源筛选：{source_filter}")
        prompt_query = RAGPrompts.subquery_prompt().format(query = query )
        try:
            subqueries = self.llm(prompt_query).strip()
            self.logger.debug(f"子查询检索结果：{subqueries}")
            subqueries_text = [sub.strip() for sub in subqueries.split("\n") if sub.strip()]
            # 检查子查询的结果是否为空
            if not subqueries_text:
                self.logger.error(f"子查询检索结果为空：{subqueries_text}")
                return []
            # 合并所有子查询的检索结果
            all_chunks = []
            for subquery in subqueries_text:
                #
                chunks = self.vector_store.hybrid_search_with_rerank(subquery, source_filter, top_k=5//2)
                all_chunks.extend(chunks)
            # 所有子查询的检索结果去重后合并后，根据rerank_score排序
            unique_chunks = {chunk.page_content: chunk for chunk in all_chunks}
            chunk_list = list(unique_chunks.values())

            return chunk_list
        except Exception as e:
            self.logger.error(f"子查询检索出错：{e}")
            return []

    def _retrieval_with_hypothesis(self, query, source_filter):
        """
        假设问题检索策略，根据用户查询，假设生成一个答案，然后根据假设答案进行检索。
        :query (str): 用户原始查询，通常是逻辑复杂或者含场景的问题
        :source_filter (str, optional): 来源筛选条件，默认None
        return list[Document]: 检索到的上下文列表
        """
        self.logger.info(f"假设问题检索：{query}, 来源筛选：{source_filter}")
        prompt_query = RAGPrompts.hyde_prompt().format(query = query )
        try:
            hypothesis = self.llm(prompt_query)
            self.logger.debug(f"假设问题检索结果：{hypothesis}")
            return self.vector_store.hybrid_search_with_rerank(hypothesis, source_filter, top_k=5)
        except Exception as e:
            self.logger.error(f"假设问题检索出错：{e}")
            return []


if __name__ == '__main__':
    vector_store = VectorStore()
    llm = vector_store.call_dashscope
