'''定义 RefineDocumentsChain 来形成文档的摘要。使用Refine模式侧重于通过
删除不必要或多余的信息来细化和改进文档摘要。
它包括对句子进行编辑和改写，使其更加简明。
其目的是创建一个摘要，准确地捕捉文档的主要思想，同时消除任务不必要的细节。
'''
from functools import partial
from operator import itemgetter

from langchain.callbacks.manager import trace_as_chain_group
from langchain_community.document_loaders import WebBaseLoader
# from langchain.chat_models import ChatAnthropic
from langchain_community.llms import Ollama
from langchain.prompts import PromptTemplate
from langchain.schema import StrOutputParser
from langchain_core.prompts import format_document

import bs4

# 导入Ollama的Qwen0.5B的大模型
llm = Ollama(model="qwen:0.5b")

# 定义 RefineDocumentsChain 来形成文档的摘要
first_prompt = PromptTemplate.from_template("Summarize this content:\n\n{context}")
# first_prompt = PromptTemplate.from_template("你现在是一个资深编辑，请总结以下内容:\n\n{context}")
document_prompt = PromptTemplate.from_template("{page_content}")
partial_format_doc = partial(format_document, prompt=document_prompt)
# 构建文档摘要生成链
summary_chain = {"context": partial_format_doc} | first_prompt | llm | StrOutputParser()

# 构建摘要生成提示模板
refine_prompt = PromptTemplate.from_template(
    "Here's your first summary: {prev_response}. "
    "Now add to it based on the following context: {context}"
)

# refine_prompt = PromptTemplate.from_template(
#     "这是你的第一个总结: {prev_response}. "
#     "现在将以下上下文进行总结并添加到总结中: {context}"
# )
# 定义refine摘要生成链
refine_chain = (
        {
            "prev_response": itemgetter("prev_response"),
            "context": lambda x: partial_format_doc(x["doc"]),
        }
        | refine_prompt
        | llm
        | StrOutputParser()
)

# 定义函数
def refine_loop(docs):
    with trace_as_chain_group("refine loop", inputs={"input": docs}) as manager:
        # 先用summary_chain进行结果生成
        summary = summary_chain.invoke(
            docs[0], config={"callbacks": manager, "run_name": "initial summary"}
        )
        # 再用 refine_chain进行结果生成
        for i, doc in enumerate(docs[1:]):
            summary = refine_chain.invoke(
                {"prev_response": summary, "doc": doc},
                config={"callbacks": manager, "run_name": f"refine {i}"},
            )
        manager.on_chain_end({"output": summary})
    return summary

def getWeChatOfficialAccountMessage(url):
    # 抓取微信公众号内容，指定抓取Html的class节点
    loader = WebBaseLoader(web_path=(url,),
                           bs_kwargs=dict(
                               parse_only=bs4.SoupStrainer(
                                   class_=("rich_media_title", "rich_media_wrp")
                               )
                           ),
                           )
    # 设置用户代理
    loader.headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
        "Content-Type": "text/html; charset=UTF-8"}

    loader.encoding = 'utf-8'
    # 获取网页内容
    content = loader.load()
    return content

url = "https://mp.weixin.qq.com/s/tn8o1HIy4tHahOWUvEF1eA"
# url = "https://mp.weixin.qq.com/s/mvUVAeXOEVbTMY56eUDzsA"
docs = getWeChatOfficialAccountMessage(url)

from langchain.schema import Document

# 使用getWeChatOfficialAccountMessage(url) 方法获取的是一个list对象，首先将其串联
def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)
text = format_docs(docs)

# 进行分词，该方法对字符串按照 ‘\n’
docs = [
    Document(
        page_content=split,
        metadata={"source": url},
    )
    for split in text.split("\n\n")
]
# 因为从微信公众号里面获取的内容特别多的回车符号连在一起，进行split会产生特别多的空字符串，所以我们需要将这些内容进行删除
clean_docs = list(filter(lambda x: x.page_content is not None and x.page_content != '' and x.page_content != '\n', docs))


# 最后调用方法：可以在控制台看见打印输出
print(refine_loop(clean_docs))