import uuid
from src.common.logger import getLogger
from langchain_core.documents import Document
from langchain_core.stores import InMemoryByteStore
from langchain.retrievers import MultiVectorRetriever
from langchain_core.prompts import ChatPromptTemplate
from src.agentic.config.VectorStore import VectorStore
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class MultiRepresentation:

    def __init__(self, llm_model, embed_model, collection_prefix, library_number, batch_size: int = 5):
        self.llm_model = llm_model
        self.embed_model = embed_model
        self.collection_prefix = collection_prefix
        self.library_number = library_number
        self.batch_size = batch_size

    def invoke(self, query):
        logger.info(f"MultiRepresentation invoke query: {query}")

        doc_collection_name = self.collection_prefix + self.library_number
        doc_vector_store = VectorStore().new_vector_store(self.embed_model, doc_collection_name)
        doc_records, _ = doc_vector_store.client.scroll(collection_name = doc_collection_name, limit = 999999, offset = None)
        logger.info(f"MultiRepresentation invoke doc_records len: {len(doc_records)}")
        doc_records = [Document(page_content = doc.payload.get("page_content", "")) for doc in doc_records]

        summary_template = """
            总结输入的文档：{document}
        """
        summary_prompt = ChatPromptTemplate.from_template(summary_template)
        summary_chain = { "document": lambda x: x.page_content } | summary_prompt | self.llm_model | StrOutputParser()
        summary_results = summary_chain.batch(doc_records[:self.batch_size])
        logger.info(f"MultiRepresentation invoke summary_results len: {len(summary_results)}")

        id_key = "doc_id"
        memory_byte_store = InMemoryByteStore()
        doc_ids = [str(uuid.uuid4()) for _ in doc_records]
        summary_docs = [Document(page_content = s, metadata = { id_key: doc_ids[i] }) for i, s in enumerate(summary_results)]

        summary_collection_name = self.collection_prefix + "summary_" + self.library_number
        logger.info(f"MultiRepresentation invoke summary_collection_name: {summary_collection_name}")
        summary_vecotr_store = VectorStore().new_vector_store(self.embed_model, summary_collection_name)

        multi_retriever = MultiVectorRetriever(vectorstore = summary_vecotr_store, byte_store = memory_byte_store, id_key = id_key)
        multi_retriever.vectorstore.add_documents(summary_docs)
        multi_retriever.docstore.mset(list(zip(doc_ids, doc_records)))

        sub_docs = summary_vecotr_store.similarity_search(query, k = 1)
        logger.info(f"MultiRepresentation invoke sub_docs: {sub_docs}")

        retrieve_docs = multi_retriever.get_relevant_documents(query, n_results = 3)
        retrieve_doc = "\n".join([doc.page_content for doc in retrieve_docs])
        logger.info(f"MultiRepresentation invoke retrieve_doc len: {len(retrieve_doc)}")

        template = """
            请基于以下上下文内容回答问题：
            {context}
            
            问题：{question}
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        chain_result = chain.invoke({ "context": retrieve_doc, "question": query })
        logger.info(f"MultiRepresentation invoke chain_result len: {len(chain_result)}")
        return { "retrieve_docs": retrieve_doc, "chain_result": chain_result }
