# 先将每个文档或文档块投喂给LLM，并对每个文档或文档块生成问题的答案进行打分，然后将打分最高的文档或文档块作为最终答案返回
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain_community.chat_models import ChatOpenAI
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import CharacterTextSplitter
import __init__

llm = ChatOpenAI(
    temperature=0,
    model="gpt-3.5-turbo",
)

# load pdf
loader = PyPDFLoader("../kecheng源码/loader.pdf")
docs = loader.load()
# split text
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=500,
    chunk_overlap=0
)
split_docs = text_splitter.split_documents(docs)

chain = load_qa_with_sources_chain(
    ChatOpenAI(temperature=0),
    chain_type="map_rerank",
    metadata_keys=['source'],
    return_intermediate_steps=True
)
print(chain)
print("----------------------------")

query = "what is this document talk about?answer by chinese"
result = chain({"input_documents":split_docs, "question":query})
print(result)
