import os
from langchain_community.chat_models import QianfanChatEndpoint
from langchain.chains import ConversationChain, LLMChain, ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory, ConversationSummaryMemory
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
from langchain_community.vectorstores import Chroma
from langchain_community.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
os.environ["QIANFAN_AK"] = "7ZITkqmufR2g6MSLQUqsYAXB"
os.environ["QIANFAN_SK"] = "n2YLOoakGri5LfsK8Fxsax86tdeLEfpm"
# 加载博客文章。
loader = WebBaseLoader("https://zhuanlan.zhihu.com/p/85289282")
data = loader.load()
# 将其拆分并存储在向量中。
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
# 存储在向量中。
vectorstore = Chroma.from_documents(documents=all_splits, embedding=QianfanEmbeddingsEndpoint())
# 像以前一样创建我们的记忆，但是让我们使用 ConversationSummaryMemory
retriever = vectorstore.as_retriever()
chat = QianfanChatEndpoint(model='ERNIE-Bot-4', temperature=0.8)
memory = ConversationSummaryMemory(
    llm=chat, memory_key="chat_history", return_messages=True
)
qa = ConversationalRetrievalChain.from_llm(llm=chat, retriever=retriever, memory=memory)
res = qa.invoke(
    {"question": "请帮我出3个明朝的题目并给出答案"}
)
print(res["answer"])