import os

from langchain.chains.summarize import load_summarize_chain
from langchain_community.document_loaders import WebBaseLoader
from langchain_openai import ChatOpenAI
from langchain_text_splitters import CharacterTextSplitter
from pydantic import SecretStr

# 读取API密钥
api_key = os.getenv("DASHSCOPE_API_KEY")
if not api_key:
    raise ValueError("请设置环境变量DASHSCOPE_API_KEY（阿里云百炼API-KEY）")

# 创建大语言模型实例
model = ChatOpenAI(
    model="qwen-plus-latest",
    temperature=0.5,
    max_tokens=None,
    timeout=None,
    max_retries=2,
    api_key=SecretStr(api_key),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

# 加载文档,使用langchain提供的WebBaseModel进行加载网上博文
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
docs = loader.load()  # 得到篇文章

# Refine
'''
RefineDocumentsChain 类似于mapreduce: 
文档链通过循环遍历输入文档并逐步更新其答案来构建响应。对于每个文档，它将当前文档和最新的中间答案传递给LLM链，以获得新的答案。
'''
# 第一步：切割阶段
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=1024, chunk_overlap=0)
split_docs = text_splitter.split_documents(docs)

# 实现
chain = load_summarize_chain(model, chain_type='refine')

result = chain.invoke(split_docs)
print(result['output_text'])
