from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain_text_splitters import CharacterTextSplitter
import __init__

# load pdf
loader = PyPDFLoader("../kecheng源码/loader.pdf")
docs = loader.load()
# split text
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=1000,
    chunk_overlap=0,
)
split_docs = text_splitter.split_documents(docs)
# print(split_docs)

# map chain
map_template = """对以下文字做简要的总结：
"{content}"
简要的总结："""
map_prompt = PromptTemplate.from_template(map_template)
llm = ChatOpenAI(
    temperature=0,
    model="gpt-3.5-turbo",
)
map_chain = LLMChain(
    llm=llm,
    prompt=map_prompt,
)
print(map_chain)

# reduce chain
reduce_template = """一下是一个摘要集合：
{doc_summaries}
将上述摘要与所有关键细节进行总结。
总结："""
reduce_prompt = PromptTemplate.from_template(reduce_template)
reduct_chain = LLMChain(
    prompt=reduce_prompt,
    llm=llm,
)
stuff_chain = StuffDocumentsChain(
    llm_chain=reduct_chain,
    document_variable_name="doc_summaries",
)
reduct_final_chain = ReduceDocumentsChain(
    combine_documents_chain=stuff_chain,
    # 超过4000个token就会切入到下一个stuff_chain
    collapse_documents_chain=stuff_chain,
    token_max=4000,
)

# map reduct chain
map_reduce_chain = MapReduceDocumentsChain(
    llm_chain=map_chain,
    document_variable_name="content",
    reduce_documents_chain=reduct_final_chain,
)

summary = map_reduce_chain.run(split_docs)
print("----------------------------------------")
print(summary)
