# 构建RAG对话应用

import os

from langchain.chains.combine_documents import create_stuff_documents_chain
import bs4
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain.chains.retrieval import create_retrieval_chain
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_deepseek import ChatDeepSeek
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.chat_message_histories import ChatMessageHistory

# 本地有clash代理，所以配置一下，不然一些依赖下载不下来（chroma）,或者将代理关闭后下载
os.environ['http_proxy'] = '127.0.0.1:7890'
os.environ['https_proxy'] = '127.0.0.1:7890'

os.environ["LANGCHAIN_TRACING_V3"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["LANGCHAIN_PROJECT"] = "langchain-community-demo"
os.environ["TAVILY_API_KEY"] = "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"
os.environ["OPENAI_API_KEY"] = "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"

deepseek_api_key = "sk-1dd16a258a73428d910d38c782e1c94f"

# 1、创建模型
model = ChatDeepSeek(
    api_key=deepseek_api_key,
    # deepseek-reasoner : DeepSeek-R1
    model="deepseek-chat",
    temperature=0.7,
    max_tokens=1024
)

loader = WebBaseLoader(
    web_paths=['https://docs.spring.io/spring-cloud-gateway/reference/spring-cloud-gateway/global-filters.html'],
    bs_kwargs=dict(
        parse_only=bs4.SoupStrainer(class_=('content', ''))
    )
)
# 1、加载
docs = loader.load()
# text = "hello world, how about you? thank,i am fine,what is your name?"
# 2、分割 大文本的切割
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = splitter.split_documents(docs)

# 3、嵌入存储
# vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())
vectorstore = Chroma.from_documents(documents=splits, embedding=OllamaEmbeddings(model='deepseek-chat'))
# huggingface_embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# vectorstore = Chroma.from_documents(documents=splits, embedding=HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5"))

# 4、检索器
retriever = vectorstore.as_retriever()

# 5、整合


# 创建一个问题模版
system_prompt = """
You are a helpful assistant.
{context}
Given the above documents, use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Question: {question}
"""

prompt_template = ChatPromptTemplate.from_messages([
    ("system", system_prompt),
    MessagesPlaceholder("chat_history"),
    ("human", "{question}"),
])

# 获取chain
# 根据模型和提示词整合
chain1 = create_stuff_documents_chain(model, prompt_template)
# 在上述回答基础上添加检索器再进行检索
# chain2 = create_retrieval_chain(retriever, chain1)

# resp = chain2.invoke({"question": "how many spring cloud gateway global filter ?"})
#
# print(resp)

'''
注意：
一般情况下，我们构建的链（chain）直接使用问答记录来关联上下文，但在此案例中，查询检索器也需要对话上下文才能被理解
    直接使用问答关联 prompt_template中设置MessagesPlaceholder（如：langchain-聊天机器人及流式输出-历史记录.py）

解决方案：
    添加一个子链（一个大的链中再包含链），它采用最新用户问题和聊天记录，并在它引用历史信息中的任何信息时重新表述问题。这可以被简单的认为是构建
    一个新的“历史感知”检索器。
子链的目的：让检索过程融入了对话的上下文。
'''

# 子链的提示模板(历史记录)
contextualize_q_system_prompt = """ Given a chat history, use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Chat History: {chat_history}
Question: {question}
"""

retriever_history_prompt_template = ChatPromptTemplate.from_messages([
    ("system", contextualize_q_system_prompt),
    # key要和上面定义的一致
    MessagesPlaceholder("chat_history"),
    ("human", "{question}"),
])


# 创建一个子链
history_chain = create_history_aware_retriever(model, retriever, retriever_history_prompt_template)

# 保存问答历史记录
chat_history = {}  # 所有用户的聊天都保存， key: session_id, value: 历史聊天记录对象


def get_session_history(session_id):
    """根据sessionId获取历史聊天记录对象"""
    if session_id not in chat_history:
        chat_history[session_id] = ChatMessageHistory()
    return chat_history[session_id]


# 检索器链整合
chain = create_retrieval_chain(history_chain, chain1)
# 最终的链
result_chain = RunnableWithMessageHistory(
    chain,
    get_session_history,
    # 用户输入的key，与上面保持一致
    input_messages_key='question',
    # 历史记录的key,与上面保持一致
    history_messages_key='chat_history',
    # 输出答案的key，用它来获取结果
    output_messages_key='answer'
)


# 测试 第一轮问答
resp = result_chain.invoke(
    {'question': 'how many spring cloud gateway global filter ?'},
    config={
        # 给当前会话定义一个sessionId
        'configurable': {'session_id': 'ceshi123'}
    }
)
print(resp['answer'])