"""
完整示例：RAG + 后退一步问题生成 + 向量检索 + 去重
功能：
1️⃣ 将用户原始问题生成一个"后退一步"抽象问题
2️⃣ 对原始问题和抽象问题分别检索向量数据库
3️⃣ 合并检索结果并去重
4️⃣ 打印抽象问题和最终检索文档
"""
from dotenv import load_dotenv
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_chroma import Chroma
from langchain_core.documents import Document
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI
from langchain_text_splitters import CharacterTextSplitter

load_dotenv()

# ---------------------------
# 1️⃣ 构建文档库
# ---------------------------
docs = [
    Document(page_content="LangChain 是一个让大模型可以使用外部数据和工具的框架。", metadata={"source": "langchain_intro", "category": "technical"}),
    Document(page_content="Retriever 是 LangChain 中用于从数据库中检索相关信息的组件。", metadata={"source": "retriever_guide", "category": "technical"}),
    Document(page_content="Milvus 是一个高性能的向量数据库，用于存储和检索向量化的数据。", metadata={"source": "milvus_docs", "category": "database"})
]

# 切分文档
splitter = CharacterTextSplitter(chunk_size=30, chunk_overlap=10)
split_docs = splitter.split_documents(docs)

# ---------------------------
# 2️⃣ 构建向量库
# ---------------------------
embeddings = HuggingFaceEmbeddings(model_name="D:/models/BAAIbge-base-zh-v1.5", model_kwargs={'device': 'cpu'})
vectorstore = Chroma.from_documents(split_docs, embeddings)

custom_retriever = vectorstore.as_retriever(
    search_type="similarity",
    search_kwargs={"k": 5}
)

# ---------------------------
# 3️⃣ 初始化 LLM
# ---------------------------
#llm = ChatOpenAI(model="xdeepseekv32exp", temperature=0.7)
llm = ChatOllama(temperature=0, model="deepseek-r1:1.5b", base_url="http://127.0.0.1:11434")

# ---------------------------
# 4️⃣ 构建"后退一步"链
# ---------------------------
step_back_prompt = ChatPromptTemplate.from_messages([
    (
        "user",
        "请将以下具体问题抽象为一个更通用的'后退一步'问题。\n"
        "例如：'LCEL 和 asyncio 如何交互？' -> 'LCEL 的异步机制是怎样的？'\n\n"
        "原始问题：{original_question}"
    )
])

step_back_chain = step_back_prompt | llm | StrOutputParser()

# ---------------------------
# 5️⃣ 去重函数
# ---------------------------
def remove_duplicates(docs):
    """
    文档去重
    """
    seen = set()
    result = []
    for d in docs:
        content = d.page_content if hasattr(d, "page_content") else str(d)
        if content not in seen:
            seen.add(content)
            result.append(d)
    return result

# ---------------------------
# 6️⃣ 修正的完整流程 - 顺序执行
# ---------------------------
def full_rag_chain(original_question: str):
    """完整的RAG流程"""

    # 1. 生成后退一步问题
    step_back_question = step_back_chain.invoke({"original_question": original_question})

    print(f"原始问题: {original_question}")
    print(f"抽象问题: {step_back_question}")

    # 2. 分别检索
    original_docs = custom_retriever.invoke(original_question)
    step_back_docs = custom_retriever.invoke(step_back_question)

    print(f"\n原始问题检索到 {len(original_docs)} 个文档")
    print(f"抽象问题检索到 {len(step_back_docs)} 个文档")

    # 3. 合并并去重
    all_docs = original_docs + step_back_docs
    unique_docs = remove_duplicates(all_docs)

    print(f"去重后共有 {len(unique_docs)} 个文档")

    return {
        "original_question": original_question,
        "abstract_question": step_back_question,
        "docs": unique_docs
    }

# ---------------------------
# 7️⃣ 使用 Runnable 的版本（如果需要链式结构）
# ---------------------------
def create_sequential_chain():
    """创建顺序执行的链"""

    def process_question(input_dict):
        original_question = input_dict["original_question"]

        # 顺序执行步骤
        step_back_question = step_back_chain.invoke({"original_question": original_question})
        original_docs = custom_retriever.invoke(original_question)
        step_back_docs = custom_retriever.invoke(step_back_question)

        # 合并去重
        unique_docs = remove_duplicates(original_docs + step_back_docs)

        return {
            "original_question": original_question,
            "abstract_question": step_back_question,
            "docs": unique_docs
        }

    return RunnableLambda(process_question)

# ---------------------------
# 8️⃣ 测试
# ---------------------------
original_question = "LangChain 如何结合 Retriever 与大模型使用？"

print("方法1: 直接函数调用")
print("=" * 50)
result = full_rag_chain(original_question)

print("\n最终结果:")
print("-" * 30)
print(f"原始问题: {result['original_question']}")
print(f"抽象问题: {result['abstract_question']}")
print(f"检索到 {len(result['docs'])} 个文档:")
for i, doc in enumerate(result['docs'], 1):
    content = doc.page_content
    print(f"{i}. {content}")
    if hasattr(doc, 'metadata') and doc.metadata:
        print(f"   元数据: {doc.metadata}")

print("\n" + "=" * 50)
print("方法2: 使用 Runnable 链")
print("=" * 50)

sequential_chain = create_sequential_chain()
result2 = sequential_chain.invoke({"original_question": original_question})

print(f"原始问题: {result2['original_question']}")
print(f"抽象问题: {result2['abstract_question']}")
print(f"检索到 {len(result2['docs'])} 个文档:")
for i, doc in enumerate(result2['docs'], 1):
    content = doc.page_content
    print(f"{i}. {content}")