from src.common.logger import getLogger
from langchain_core.runnables import RunnableLambda
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate

logger = getLogger()

class StepBack:

    def __init__(self, llm_model, vector_store):
        self.llm_model = llm_model
        self.vector_store = vector_store

    def invoke(self, query):
        logger.info(f"StepBack invoke query: {query}")
        retriever = self.vector_store.as_retriever(search_kwargs = { "k": 3 })

        example_template = [
            {
                "input": "Could the members of The Police perform lawful arrests?",
                "output": "what can the members of The Police do?",
            }, {
                "input": "Jan Sindel's was born in what country?",
                "output": "what is Jan sindel's personal history?",
            }
        ]
        example_prompt = ChatPromptTemplate.from_messages([("human", "{input}"), ("ai", "{output}")])
        few_show_prompt = FewShotChatMessagePromptTemplate(example_prompt = example_prompt, examples = example_template)

        step_back_template = """
            你是世界知识专家。
            您的任务是退后一步，将问题改写为更通用的退后式提问，使问题更容易回答。
            这里有几个例子：
        """
        step_back_prompt = ChatPromptTemplate.from_messages([
            ("system", step_back_template), few_show_prompt, ("user", "{question}")
        ])
        step_back_chain = step_back_prompt | self.llm_model | StrOutputParser()
        step_back_result = step_back_chain.invoke({ "question": query })
        logger.info(f"StepBack invoke step_back_result: {step_back_result}")

        template = """
            您是世界知识的专家。我将向您提问。
            您的回答应全面，且不得与相关上下文内容相矛盾。
            否则，如果它们不相关，则忽略它们。
            原始问题的上下文：{normal_context}
            降级问题的上下文：{step_back_context}
            问题：{question}
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = { "normal_context": RunnableLambda(lambda x: x["question"]) | retriever,
                  "step_back_context": step_back_chain | retriever,
                  "question": lambda x: x["question"]
        } | prompt | self.llm_model | StrOutputParser()
        chain_result = chain.invoke({ "context": step_back_result, "question": query })
        logger.info(f"StepBack invoke chain_result len: {len(chain_result)}")
        return { "retrieve_docs": step_back_result, "chain_result": chain_result }
