import uuid
from dotenv import load_dotenv

from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain.storage import InMemoryStore
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import ChatOpenAI

load_dotenv()

# ====== 1. 模型加载 ======
embeddings = HuggingFaceEmbeddings(
    model_name="D:/models/BAAIbge-base-zh-v1.5",
    model_kwargs={'device': 'cpu'}
)
#llm = ChatOpenAI(model="deepseek-ai/DeepSeek-R1-0528", temperature=0.7)
llm = ChatOpenAI(model="xdeepseekv32exp", temperature=0.7)

# ====== 2. 原始文档 ======
docs = [
    Document(page_content="RAG_Fusion通过生成多个查询变体并使用RRF算法智能排序来提升检索相关性", metadata={"doc_id": str(uuid.uuid4())}),
    Document(page_content="假设性文档嵌入（HyDE）先生成一个理想答案，再用答案的嵌入来检索真实文档。", metadata={"doc_id": str(uuid.uuid4())})
]
doc_ids = [doc.metadata["doc_id"] for doc in docs]

# ====== 3. 构建问题生成链 ======
question_gen_prompt = ChatPromptTemplate.from_template(
    "你是一位AI专家。请根据以下文档内容，生成3个用户可能会提出的，高度相关的问题。\n"
    "只返回问题列表，每个问题一行，不要有其他前缀或编号。\n\n"
    "文档内容为：\n"
    "-----------------\n"
    "{content}\n"
    "-----------------\n"
)

question_generator_chain = question_gen_prompt | llm | StrOutputParser()

# ====== 4. 生成子文档（问题作为索引向量）======
sub_docs = []

for i, doc in enumerate(docs):
    doc_id = doc_ids[i]
    questions_str = question_generator_chain.invoke({"content": doc.page_content})
    questions = [q.strip() for q in questions_str.split("\n") if q.strip()]
    for q in questions:
        sub_docs.append(Document(page_content=q, metadata={"doc_id": doc_id}))

# ====== 5. 构建向量库，存子问题 ======
vectorstore_qa = Chroma.from_documents(
    documents=sub_docs,
    embedding=embeddings
)

# ====== 6. 存原始文档到 doc_store ======
doc_store = InMemoryStore()
doc_store.mset([(doc_id, doc) for doc_id, doc in zip(doc_ids, docs)])

# ====== 8. 构建多向量检索器 ======
multivertor_retriever = MultiVectorRetriever(
    vectorstore=vectorstore_qa,
    docstore=doc_store,
    id_key="doc_id"
)

# ====== 9. 查询测试 ======
user_query = "RAG_Fusion是怎么工作的？"
retrieved_docs = multivertor_retriever.invoke(user_query)

print("检索结果：")
for doc in retrieved_docs:
    print(f"- {doc.page_content} (doc_id={doc.metadata['doc_id']})")
