import bs4
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain_ollama import ChatOllama, OllamaEmbeddings
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough

# only keep post title, heads, and content from the full HTML.
bs4_strainer = bs4.SoupStrainer(
    class_=("post-title", "post-header", "post-content")
)

# urllib3.exceptions.ProtocolError: ('Connection broken: IncompleteRead(49152 bytes read, 76161 more expected)', IncompleteRead(49152 bytes read, 76161 more expected))
# 添加参数: requests_kwargs={"stream": True}
loader = WebBaseLoader(
    web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
    bs_kwargs={"parse_only": bs4_strainer},
    requests_kwargs={"stream": True}
)
docs = loader.load()

# print(docs)
# print(docs[0].page_content)

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000,
    chunk_overlap=200,
    add_start_index=True
)

all_splits = text_splitter.split_documents(docs)

# print(len(all_splits))

vectorstore = Chroma.from_documents(
    documents=all_splits, 
    embedding=OllamaEmbeddings(model="mxbai-embed-large:latest")
)

retriever = vectorstore.as_retriever(
    search_type="similarity",
    search_kwargs={"k": 6}
)

retrieved_docs = retriever.invoke("What are the approaches to Task Decompositoin?")
# print(len(retrieved_docs))
# print(retrieved_docs[0].page_content)

model = ChatOllama(model="glm4")

prompt = hub.pull("rlm/rag-prompt")

#example_messages = prompt.invoke(
#    {"context": "filter context", "question": "filter question"}
#).to_messages()

# print(example_messages)
# print(example_messages[0].content)

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

rag_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | prompt
    | model
    | StrOutputParser()
)

for chunk in rag_chain.stream("What is Task Decomposition?"):
    print(chunk, end="", flush=True)
