from src.common.logger import getLogger
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class HYDERAG:

    def __init__(self, llm_model, vector_store):
        self.llm_model = llm_model
        self.vector_store = vector_store

    def invoke(self, query):
        logger.info(f"HYDERAG invoke query: {query}")
        retriever = self.vector_store.as_retriever(search_kwargs = { "k": 3 })

        hyde_question_template = """
            你是一个智能助手AI，你的任务是用给定的问题生成一篇科学的文章。
            给定的问题： {question}
        """
        hyde_question_template = ChatPromptTemplate.from_template(hyde_question_template)
        hyde_question_chain = hyde_question_template | self.llm_model | StrOutputParser()

        hyde_retrieve_chain = hyde_question_chain | retriever
        hyde_retrieve_docs = hyde_retrieve_chain.invoke({ "question": query })
        hyde_retrieve_doc = "\n".join([doc.page_content for doc in hyde_retrieve_docs])
        logger.info(f"HYDERAG invoke hyde_retrieve_doc len: {len(hyde_retrieve_doc)}")

        template = """
            根据提供的上下文问题回答问题：
            上下文：{context}
            问题：{question}
        """
        prompt = ChatPromptTemplate.from_template(template)
        # chain = { "context": hyde_retrieve_chain, "question": RunnablePassthrough() } | prompt | self.llm_model | StrOutputParser()
        chain = prompt | self.llm_model | StrOutputParser()
        chain_result = chain.invoke({ "context": hyde_retrieve_doc, "question": query })
        logger.info(f"HYDERAG invoke chain_result len: {len(chain_result)}")
        return { "retrieve_docs": hyde_retrieve_doc, "chain_result": chain_result }
