"""
MultiVectorRetriever 是 LangChain 中一种特殊的检索器，它允许您基于文档的多个向量表示进行检索。这种检索器在需要从不同角度或不同粒度检索文档时特别有用。
核心概念
MultiVectorRetriever 的核心思想是：
1、一个文档可以有多个向量表示
2、这些向量可以来自文档的不同部分、不同摘要或不同嵌入方式
3、检索时可以考虑所有这些向量表示
"""
import uuid

from langchain.retrievers import MultiVectorRetriever
from langchain.storage import LocalFileStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableConfig
from langchain_text_splitters import RecursiveCharacterTextSplitter

from models import get_ollama_embeddings_client, get_ds_model_client

file_path = "../data/document/deepseek百度百科.txt"
docs = TextLoader(file_path, encoding="utf-8").load()
split_docs = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=100).split_documents(docs)

llm = get_ds_model_client()
embeddings_client = get_ollama_embeddings_client()

id_key = "doc_id"
docs_ids = [str(uuid.uuid4()) for _ in range(len(split_docs))]

def get_summaries_documents():
    # 借助大模型把文档进行摘要
    chain = (
        {"doc": lambda x: x.page_content}
        | PromptTemplate.from_template("总结下面的文档:\n\n{doc}")
        | llm
        | StrOutputParser()
    )
    print("准备生成文档摘要，时间稍长，请耐心等待...")
    summaries = chain.batch(split_docs, RunnableConfig(max_concurrency=5))
    # 包装为Document对象
    return [
        Document(page_content=s, metadata={id_key: docs_ids[i]}) for i, s in enumerate(summaries)
    ]

def save_documents(summaries_docs):
    # 将摘要添加到向量数据库
    print("准备将摘要添加到向量数据库...")
    retriever.vectorstore.add_documents(summaries_docs)
    print("准备将原始文档存储到字节存储...")
    retriever.docstore.mset(list(zip(docs_ids, split_docs)))


vector_store = Chroma(collection_name="summaries", embedding_function=embeddings_client, persist_directory="../data/cache/summaries")
byte_store = LocalFileStore(root_path="../data/cache/document_store")
retriever = MultiVectorRetriever(vectorstore=vector_store, byte_store=byte_store, id_key=id_key)

# 执行一次持久化数据到本地
summaries_documents = get_summaries_documents()
save_documents(summaries_documents)

query = "deepseek的企业事件"
sub_docs = retriever.vectorstore.similarity_search(query)
print("-------------匹配的摘要内容--------------")
print(sub_docs[0])
print("-" * 80)
# 获取第一个匹配摘要的ID
match_doc_id = sub_docs[0].metadata[id_key]
print("match_doc_id:", match_doc_id)

print("-------------对应的原始文档--------------")
origin_doc = retriever.docstore.mget([match_doc_id])
print(origin_doc)

