import getpass
import os

from langchain import hub
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.retrieval import create_retrieval_chain
from langchain_chroma import Chroma
from langchain_community.document_loaders import WebBaseLoader
import bs4
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter

# bs4: BeautifulSoup库，用于HTML解析
#
# hub: LangChain的hub模块，用于获取预定义的prompt模板
#
# Chroma: 向量数据库，用于存储和检索文档的嵌入向量
#
# WebBaseLoader: 网页文档加载器
#
# StrOutputParser: 将输出解析为字符串
#
# RunnablePassthrough: 传递输入数据
#
# OpenAIEmbeddings: OpenAI的文本嵌入模型
#
# RecursiveCharacterTextSplitter: 文本分割器



# 创建一个WebBaseLoader实例，指定要加载的URL
#
# bs_kwargs参数使用SoupStrainer只解析特定class的HTML元素（post-content, post-title, post-header）
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "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"
llm = ChatOpenAI(
    api_key="sk-CftUbVSsA61lwwgMz9xvt6znTunQZfgBP8ZCVLbQsKfXUR6k",
    model='deepseek-ai/DeepSeek-V3',
    base_url="https://www.henapi.top/v1"
)

loader = WebBaseLoader(
    web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
    bs_kwargs=dict(
        parse_only=bs4.SoupStrainer(class_=("post-content", "post-title", "post-header"))
    )
)
docs=loader.load()
# 创建文本分割器，设置每个chunk大小为1000字符，重叠200字符
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
# 将加载的文档分割成小块
splits=text_splitter.split_documents(docs)
# 使用OpenAI的嵌入模型将文档块转换为向量，并存储在Chroma向量数据库中
openAIEmbeddings=OpenAIEmbeddings(api_key="sk-CftUbVSsA61lwwgMz9xvt6znTunQZfgBP8ZCVLbQsKfXUR6k",
    model='text-embedding-3-small',
    base_url="https://www.henapi.top/v1")
vectorstore=Chroma.from_documents(documents=splits, embedding=openAIEmbeddings)
# 从向量数据库创建检索器，用于后续的相似性搜索
retriver=vectorstore.as_retriever()
# 从LangChain hub获取预定义的RAG prompt模板
prompt=hub.pull("rlm/rag-prompt")
# 定义函数，将检索到的多个文档合并为一个字符串，用双换行符分隔
def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)


# rag_chain=(
#     {"context":retriver|format_docs,"question":RunnablePassthrough()}
#     |prompt
#     |llm
#     |StrOutputParser()
# )

# resp=rag_chain.invoke("What is Task Decomposition?")
# print(resp)

# for chunk in rag_chain.stream("What is Task Decomposition?"):
#     print(chunk,end="",flush=True)

#内置链
system_prompt = (
    "You are an assistant for question-answering tasks. "
    "Use the following pieces of retrieved context to answer "
    "the question. If you don't know the answer, say that you "
    "don't know. Use three sentences maximum and keep the "
    "answer concise."
    "\n\n"
    "{context}"
)

prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system_prompt),
        ("human", "{input}"),
    ]
)
question_answer_chain=create_stuff_documents_chain(llm,prompt)
rag_chain=create_retrieval_chain(retriver,question_answer_chain)
resp=rag_chain.invoke({"input":"What is Task Decomposition?"})
print(resp["answer"])
