import os

from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.summarize import load_summarize_chain
from langchain_community.document_loaders import WebBaseLoader
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
from langchain_text_splitters import CharacterTextSplitter
from pydantic import BaseModel, Field

os.environ['LANGCHAIN_TRACING_V2'] = "true"
os.environ['LANGCHAIN_API_KEY'] = '1123'

# 调用大模型

model = ChatOpenAI(model='gpt-4-turbo', temperature=0)

# 第一种stuff写法
loader = WebBaseLoader('https://')
docs = loader.load()  # 得到整篇文章

chain = load_summarize_chain(model, chain_type='stuff')

result = chain.invoke(docs)

# stuff第二种写法
prompt_template = """
针对以下内容，写一个简洁的总结摘要：
{text}
简洁的总结摘要：
"""
prompt = PromptTemplate.from_template(prompt_template)

# chain = {'text': RunnablePassthrough()} | prompt | model

llm_chain = LLMChain(llm=model, prompt=prompt)

stuff_chain = StuffDocumentsChain(llm_chain=llm_chain, document_variable_name='text')

result = chain.invoke(docs)

print(result['output_text'])

# mapreduce 方式
# 第一步：切割大文件为小文件
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=1000, chunk_overlap=0)
split_docs = text_splitter.split_documents(docs)

# 第二步map阶段，
map_template = """
以下是一组文档{documents}
"{docs}"
根据这个文档列表，请给出总结摘要：
"""

map_prompt = PromptTemplate.from_template(map_template)

map_llm_chain = LLMChain(llm=model, prompt=map_prompt)

# 第三步reduce阶段（combine和最终的reduce阶段）
reduce_template = """以下是一组总结摘要：
{docs}
将这些内容提炼成一个最终的，统一的总结摘要：
"""

reduce_prompt = PromptTemplate.from_template(reduce_template)
reduce_llm_chain = LLMChain(llm=model, prompt=reduce_prompt)

# reduce思路：如果map之后文档的累积token数超过了4000个，那么我们将递归的将文档以《=4000个token的批次传递给我们的StuffDocument，
# 一旦这些批量摘要的累积大小小于4000个token，我们将他们全部传递给StuffDocumentsChain最后依次以创建最终摘要

combine_chain = StuffDocumentsChain(llm_chain=reduce_llm_chain, document_variable_name='docs')

reduce_chain = ReduceDocumentsChain(
    # 这是最终的调用的链
    combine_documents_chain=combine_chain,
    collapse_documents_chain=combine_chain,
    token_max=4000
)

# 合并所有链
map_reduce_chain = MapReduceDocumentsChain(
    llm_chain=map_llm_chain,
    reduce_documents_chain=reduce_chain,
    document_variable_name='docs',
    return_intermediate_steps=False
)

#
result = map_reduce_chain.invoke(split_docs)
print(result['output_text'])

# 第三种refine：类似于map-reduce，文档链通过循环遍历输入文档并逐步更新其答案来构建响应，对于每个文档，它将当前文档和最新的中间答案传递给LLM链，以获得
# 第一步：切割大文件为小文件
text_splitter1 = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=1000, chunk_overlap=0)
split_docs1 = text_splitter1.split_documents(docs)

# 指定chain——type 为refine
chain = load_summarize_chain(model, chain_type='refine')
result = chain.invoke(split_docs1)
