from src.common.logger import getLogger
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class NativeRAG:

    def __init__(self, llm_model, vector_store):
        self.llm_model = llm_model
        self.vector_store = vector_store

    def invoke(self, query):
        logger.info(f"NativeRAG invoke query: {query}")
        retriever = self.vector_store.as_retriever(search_kwargs = { "k": 3 })
        retrieve_docs = retriever.invoke(query)
        docs_result = [doc.page_content for doc in retrieve_docs]
        logger.info(f"NativeRAG invoke docs_result len: {len(docs_result)}")

        template = """
            请基于以下上下文内容回答问题：
            {context}
            
            问题：{question}
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        chain_result = chain.invoke({ "context": retrieve_docs, "question": query })
        logger.info(f"NativeRAG invoke chain_result len: {len(chain_result)}")
        return { "retrieve_docs": docs_result, "chain_result": chain_result }
