import os

import bs4
from dotenv import load_dotenv
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.retrieval import create_retrieval_chain
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain_core.prompts import MessagesPlaceholder
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_chroma import Chroma
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_openai import ChatOpenAI


load_dotenv()

# 1.创建模型
model = ChatOpenAI(
    model='qwen-plus',
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

# 通过DocumentLoader加载数据
loader = WebBaseLoader(
    web_paths=['https://lilianweng.github.io/posts/2023-06-23-agent/'],
    bs_kwargs=dict(
        parse_only=bs4.SoupStrainer(class_=('post-header', 'post-title', 'post-content'))
    )
)

docs = loader.load()
print(docs)

# 分割 Text Splitters，将大型文档分割成更小的块，对于索引数据和把数据传给模型很有用，因为大块数据更难搜索，并且不适合模型的有限上下文窗口

# text = "Building agents with LLM (large language model) as its core controller is a cool concept. Several " \
#        "proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The " \
#        "potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be " \
#        "framed as a powerful general problem solver. "
# splitter = RecursiveCharacterTextSplitter(chunk_size=20, chunk_overlap=6)
# res = splitter.split_text(text)
# for re in res:
#     print(re + "****\n")

splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = splitter.split_documents(docs)

# 存储
vector_store = Chroma.from_documents(documents=splits, embedding=DashScopeEmbeddings())

# 检索器
retriever = vector_store.as_retriever()

# 整合

system_prompt = """You are an assistant for question-answering tasks. 
Use the following pieces of retrieved context to answer the question. 
If you don't know the answer, say that you
don't know. Use three sentences maximum and keep the answer concise.\n

{context}
"""

prompt = ChatPromptTemplate([
    ("system", system_prompt),
    MessagesPlaceholder('chat_history'),  # 提问和回答的历史记录
    ("human", "{input}"),
])

# chain = prompt | model
chain1 = create_stuff_documents_chain(llm=model, prompt=prompt)
# chain2 = create_retrieval_chain(retriever=retriever, combine_docs_chain=chain1)
#
# resp = chain2.invoke({'input': 'What is task decomposition?'})
#
# print(resp['answer'])

# 注意：一般情况下，我们构建的链直接使用输入的问答记录来关联上下文。但在此案例中，查询检索器也需要对话上下文才能被理解。
# 解决方法：
# 添加一个子链，它采用最新用户问题和聊天历史，并在它引用历史信息中的任何信息时重新表达问题。这个已被简单的认为是构建一个新的”历史感知“检索器。
# 这个子链的目的：让检索过程融入了对话的上下文。

# https://www.mashibing.com/study?courseNo=2650&sectionNo=107228&callbackUrl=/subject/studyline/167?courseId=33008&courseVersionId=3544
# 创建一个子链，用于对检索器的历史记录进行处理
contextualize_q_system_prompt = """Given a chat history and the latest question 
which might reference context in the chat history,
formulate a standalone question which can be understood
without the chat history. Do NoT answer the question,
just reformulate it if needed and otherwise return it as is."""

retriever_history_prompt_template = ChatPromptTemplate.from_messages(
    [
        ("system", contextualize_q_system_prompt),
        MessagesPlaceholder('chat_history'),
        ("human", "{input}")
    ]
)

history_chain = create_history_aware_retriever(llm=model, retriever=retriever, prompt=retriever_history_prompt_template)

# 保存问答的历史记录
store = {}


def get_session_history(session_id: str):
    if session_id not in store:
        store[session_id] = ChatMessageHistory()
    return store[session_id]


# 创建一个父chain，把前两个链整合
chain = create_retrieval_chain(retriever=history_chain, combine_docs_chain=chain1)

result_chain = RunnableWithMessageHistory(
    chain,
    get_session_history,
    input_messages_key='input',
    history_messages_key='chat_history',
    output_messages_key='answer'
)


# 第一轮对话
resp1 = result_chain.invoke(
    {'input': 'What is task decomposition?'},
    config={'configurable': {'session_id': 'lll123456'}}
)
print(resp1['answer'])


# 第二轮对话
resp2 = result_chain.invoke(
    {'input': 'What are common ways of doing it?'},
    config={'configurable': {'session_id': 'lll123456'}}
)
print(resp2['answer'])

