import os  # 用于访问环境变量和操作系统功能

import bs4
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain.chains.retrieval import create_retrieval_chain
from langchain_chroma import Chroma
from langchain_community.chat_message_histories import ChatMessageHistory  # 引入历史聊天对象
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pydantic import SecretStr

api_key = os.getenv("DASHSCOPE_API_KEY")
# 检查API密钥是否存在，若不存在则抛出错误提示
if not api_key:
    raise ValueError("请设置环境变量DASHSCOPE_API_KEY（阿里云百炼API-KEY）")
# 1. 创建大语言模型实例（适配阿里云百炼的DeepSeek模型）
model = ChatOpenAI(
    model="qwen-plus-latest",  # 模型名称，需与阿里云百炼平台支持的模型名一致
    temperature=0,  # 生成内容的随机性（0表示确定性输出，1表示最大随机性）
    max_tokens=None,  # 最大生成 tokens 数（None表示使用模型默认值）
    timeout=None,  # 超时时间（None表示无超时限制）
    max_retries=2,  # 调用失败时的最大重试次数
    api_key=SecretStr(api_key),  # 以安全字符串形式传递API密钥
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 阿里云百炼的OpenAI兼容接口地址
)
# 1.加载数据 https://lilianweng.github.io/posts/2023-06-23-agent/
loader = WebBaseLoader(
    web_paths=['https://lilianweng.github.io/posts/2023-06-23-agent/'],
    bs_kwargs=dict(
        parse_only=bs4.SoupStrainer(
            class_=('post-header', 'post-title', 'post-content'))),
    header_template={
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"}
)
docs = loader.load()

# 2. 切割大文本
splitter = RecursiveCharacterTextSplitter(
    chunk_size=10000,  # 每个片段1000字符
    chunk_overlap=200,  # 允许重复200个字符
)

splits = splitter.split_documents(docs)

# 3. 存储
embedding = DashScopeEmbeddings(
    model="text-embedding-v4",
)

vectorstore = Chroma.from_documents(
    documents=splits,
    embedding=embedding
)
# 4. 检索
retriever = vectorstore.as_retriever()

# 提示词模板
system_prompt = """You are an assistant for question-answering tasks.
Use the following pieces of retrieved context to answer the question. 
If you don't know the answer, say that you don't know.
Use three sentences maximum and keep the answer concise.\n 
{context}
"""

prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system_prompt),
        MessagesPlaceholder("chat_history"),
        ("human", "{input}")
    ]
)

# 得到chain
chain1 = create_stuff_documents_chain(model, prompt)

# chain2 = create_retrieval_chain(retriever, chain1)
# result = chain2.invoke({'input': "What is Task Decomposition?"})

# print(result['answer'])

# 创建一个子链
# 子链提示模板
contextualize_q_system_prompt = """Given a chat history (each line is "user: ..." or "assistant: ...") and the latest user question,
formulate a standalone question which can be understood without the chat history.
Do NOT answer the question, just reformulate it if needed.
"""

retriever_history_temp = ChatPromptTemplate.from_messages(
    [
        ("system", contextualize_q_system_prompt),
        MessagesPlaceholder(variable_name="chat_history", optional=True),
        ("human", "{input}")
    ]
)

history_chain = create_history_aware_retriever(model, retriever, retriever_history_temp)

# 保存问答历史记录
store = {}  # 所有用户的聊天记录都保存到store里面。 key:sessionID -> value : 历史聊天记录对象


# 修改获取会话历史的函数，返回纯文本列表
def get_session_history(session_id: str):
    if session_id not in store:
        store[session_id] = ChatMessageHistory()
    # 将Message对象转换为字符串（例如 "user: 问题内容" 或 "assistant: 回答内容"）
    history = store[session_id]
    return [
        f"{'user' if msg.type == 'human' else 'assistant'}: {msg.content}"
        for msg in history.messages
    ]


# 创建父链 把前两个链整合起来
chain = create_retrieval_chain(history_chain, chain1)

# 用来发送消息
result_chain = RunnableWithMessageHistory(
    chain,
    get_session_history,
    input_messages_key='input',  # 每次聊天发送消息的key
    history_message_key='chat_history',
    output_messages_key='answer',
)
# 定义invoke发送消息配置
chatConfig = {'configurable': {'session_id': 'zs123'}}  # 给会话定义一个session_id

res = result_chain.invoke(
    {'input': "What is Task Decomposition?"},
    config=chatConfig
)
print(res['answer'])
