from src.common.logger import getLogger
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class SubQuestion:

    def __init__(self, llm_model, vector_store):
        self.llm_model = llm_model
        self.vector_store = vector_store

    def invoke(self, query):
        logger.info(f"SubQuestion invoke query: {query}")
        retriever = self.vector_store.as_retriever(search_kwargs = { "k": 3 })

        sub_question_template = """
            你是一个智能助手AI，你的任务是用给定的问题生成3个子问题，使生成的3个子问题可以独立的回答。生成的3个问题用换行符隔开。
            给定的问题： {question}
        """
        sub_question_prompt = ChatPromptTemplate.from_template(sub_question_template)
        sub_question_chain = sub_question_prompt | self.llm_model | StrOutputParser() | (lambda x: x.split("\n"))
        sub_questions = sub_question_chain.invoke({ "question": query })
        logger.info(f"SubQuestion invoke sub_questions: {sub_questions}")

        sub_question_answers = []
        sub_question_answer_template = """
            请根据提供的上下文回答问题。
            上下文： {context}
            问题： {question}
        """
        sub_question_answer_prompt = ChatPromptTemplate.from_template(sub_question_answer_template)
        for sub_question in sub_questions:
            sub_question_doc = retriever.invoke(sub_question)
            sub_question_answer_chain = sub_question_answer_prompt | self.llm_model | StrOutputParser()
            sub_question_answer = sub_question_answer_chain.invoke({ "context": sub_question_doc, "question": sub_question })
            sub_question_answers.append(sub_question_answer)

        format_content = ""
        for i, (sub_question, sub_question_answer) in enumerate(zip(sub_questions, sub_question_answers), start = 1):
            format_content += f"Question {i}: {sub_question} \ Answer {i}: {sub_question_answer} \n\n"

        template = """
            提供了下上下文问题 + 答案集合：
            {context}
            用下上下文问题 + 答案集合综合分析问题的答案。
            问题：{question}
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        chain_result = chain.invoke({ "context": format_content, "question": query })
        logger.info(f"SubQuestion invoke chain_result len: {len(chain_result)}")
        return { "retrieve_docs": format_content, "chain_result": chain_result }
