"""
RAG联系，通过读取文件内容，作为外部资源合并到查询里面。
"""
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate, format_document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_community.vectorstores.faiss import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from customize.get_ollama import GetOllama
from langchain_community.document_loaders import PyPDFLoader
# 加载论文
# loader = ArxivLoader(query="2210.03629", load_max_docs=1)
loader = PyPDFLoader("resources/langchain_rag_sys.pdf")
docs = loader.load()

# 把文本分割
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
chunks = text_splitter.split_documents(docs)

# 构建FAISS向量存储和对应的Retriever

vs = FAISS.from_documents(chunks, GetOllama(model_name="EntropyYue/chatglm3:6b", model_type=2)())
# print(vs.similarity_search("What is ReAct"))
retriever = vs.as_retriever()

# 构建Document转文本段落的工具函数
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")


def _combine_documents(docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"):
    doc_string = [format_document(doc, document_prompt) for doc in docs]
    return document_separator.join(doc_string)


# 准备Model I/O 三元组
template = """仅根据以下上下文回答问题:{context},
        问题:{question}
        """
# template = """Answer the question based only on the following context:{context}
#         Question:{question}
#         """

prompt = ChatPromptTemplate.from_template(template)
model = GetOllama(model_name="EntropyYue/chatglm3:6b", model_type=1, num_ctx=131042)()

chain = (
    {
        "context": retriever | _combine_documents,
        "question": RunnablePassthrough()
    }
    | prompt
    | model
    | StrOutputParser()
)
chat_str = ""
while chat_str != "exit":
    chat_str = input("请输入问题：")
    if chat_str != "exit":
        result = chain.invoke(chat_str)
        print(result)