"""
修复历史感知检索器问题 - 完整版本
"""

from dotenv import load_dotenv
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain.chains.retrieval import create_retrieval_chain
from langchain_chroma import Chroma
from langchain_community.document_loaders import WebBaseLoader
from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
from langchain_core.prompts import MessagesPlaceholder, ChatPromptTemplate
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.messages import HumanMessage, AIMessage
import os

load_dotenv()

def main():
    print("🚀 启动修复版 RAG 系统...")

    try:
        # 1. 加载文档
        print("📄 加载文档...")
        loader = WebBaseLoader(
            web_paths=["https://baike.baidu.com/item/%E5%BE%90%E7%92%90/51957","https://baike.sogou.com/v6380555.htm?ch=frombaikevr&fromTitle=%E5%BE%90%E7%92%90"]
        )
        docs = loader.load()
        print(f"   加载了 {len(docs)} 个文档")

        # 2. 文本分割
        print("✂️ 文本分割...")
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=100
        )
        splits = text_splitter.split_documents(docs)
        print(f"   分割为 {len(splits)} 个片段")

        # 3. 创建嵌入模型
        print("🔤 创建嵌入模型...")
        embeddings = HuggingFaceEmbeddings(model_name="D:/models/BAAIbge-base-zh-v1.5", model_kwargs={'device': 'cpu'})

        # 4. 创建向量存储
        print("💾 创建向量存储...")
        vector_store = Chroma.from_documents(splits, embeddings)
        retriever = vector_store.as_retriever(search_kwargs={"k": 3})

        # 5. 创建 LLM
        print("🧠 创建 LLM...")
        llm = ChatOpenAI(api_key=os.getenv("MODELSCOPE_API_KEY"), openai_api_base=os.getenv("MODELSCOPE_API_BASE"), model="Qwen/Qwen2.5-VL-7B-Instruct", streaming=True, temperature=0)

        # 6. 创建提示词模板
        print("📝 创建提示词...")

        # 基础系统提示词
        system_prompt = """你是一个用于问答任务的助手。
使用以下检索到的上下文片段来回答问题。
如果你不知道答案，请说你不知道。
最多使用三句话，保持回答简洁。

上下文：
{context}"""

        # 问答提示词
        qa_prompt = ChatPromptTemplate.from_messages([
            ("system", system_prompt),
            MessagesPlaceholder("chat_history"),
            ("human", "{input}")
        ])

        # 7. 创建文档问答链
        question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)

        # 8. 创建检索链（不使用历史感知检索器）
        rag_chain = create_retrieval_chain(retriever, question_answer_chain)

        # 9. 会话历史管理
        store = {}

        def get_session_history(session_id: str) -> BaseChatMessageHistory:
            if session_id not in store:
                store[session_id] = InMemoryChatMessageHistory()
                print(f"   🆕 创建新会话: {session_id}")
            return store[session_id]

        # 10. 创建带会话历史的链
        conversational_rag_chain = RunnableWithMessageHistory(
            rag_chain,
            get_session_history,
            input_messages_key="input",
            history_messages_key="chat_history",
        )

        print("✅ 所有组件创建成功!")

        # 11. 测试
        print("\n🧪 开始测试...")
        response = conversational_rag_chain.invoke(
            {"input": "徐璐是谁"},
            config={"configurable": {"session_id": "abc123"}}
        )

        print("🎯 回答:", response["answer"])

        # 测试连续对话
        print("\n--- 连续对话测试 ---")
        response2 = conversational_rag_chain.invoke(
            {"input": "她演过什么电视剧？"},
            config={"configurable": {"session_id": "abc123"}}
        )
        print("🎯 回答:", response2["answer"])

    except Exception as e:
        print(f"❌ 错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()