from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
print(f"current_dir: {current_dir}")
sys.path.insert(0, parent_dir)


from my_common import  BGE_LARGE_ZH_V1_5_MODEL, get_langchain_chat_openai


if __name__ == "__main__":
    # 1. Data
    # The first step is to load the pdf of our paper:
    # 第一步是加载我们论文的 pdf
    loader = PyPDFLoader(f"{current_dir}/2402.03216v4.pdf")
    docs = loader.load()
    print(docs[0].metadata)

    # The whole paper contains 18 pages. That's a huge amount of information. Thus we split the paper into chunks to construct a corpus.
    # 全书共 18 页。这是一个巨大的信息量。因此，我们将论文分成块来构建一个语料库。
    # initialize a splitter
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,    # Maximum size of chunks to return
        chunk_overlap=150,  # number of overlap characters between chunks
    )

    # use the splitter to split our paper
    corpus = splitter.split_documents(docs)

    """
    2. Indexing
    Indexing is one of the most important part in RAG. LangChain provides APIs for embedding models and vector databases that make things simple and straightforward.
    索引是 RAG 中最重要的部分之一。LangChain 提供了用于嵌入模型和矢量数据库的 API，使事情变得简单明了。

    Here, we choose bge-base-en-v1.5 to embed all the chunks to vectors, and use Faiss as our vector database.
    在这里，我们选择 bge-base-en-v1.5 将所有块嵌入到 vector 中，并使用 Faiss 作为我们的 vector 数据库。
    """
    from langchain_huggingface.embeddings import HuggingFaceEmbeddings

    embedding_model = HuggingFaceEmbeddings(model_name=BGE_LARGE_ZH_V1_5_MODEL, 
        encode_kwargs={"normalize_embeddings": True})
    """
    Then create a Faiss vector database given our corpus and embedding model.
    然后根据我们的语料库和embedding模型创建一个 Faiss 向量数据库。

    If you want to know more about Faiss, refer to the tutorial of Faiss and indexing.
    如果您想了解更多关于 Faiss 的信息，请参考 Faiss 和索引教程。
    """
    from langchain.vectorstores import FAISS

    vectordb = FAISS.from_documents(corpus, embedding_model)

    # (optional) save the vector database to a local directory
    vectordb.save_local(f"{current_dir}/vectorstore.db")

    # Create retriever for later use
    # 创建检索器以备后用
    retriever = vectordb.as_retriever()

    # 3. Retreive and Generate
    # Let's write a simple prompt template. Modify the contents to match your different use cases.
    # 让我们编写一个简单的提示模板。修改内容以匹配您的不同使用案例。
    from langchain_core.prompts import ChatPromptTemplate

    template = """
    您是 Q&A 聊天机器人。
    仅使用给定的上下文，回答问题。

    <context>
    {context}
    </context>

    Question: {input}
    """

    # Create a prompt template
    prompt = ChatPromptTemplate.from_template(template)
    # Now everything is ready. Assemble them to a chain and let the magic happen!
    # 现在一切都准备好了。将它们组装成链条，让魔法发生！
    from langchain.chains.combine_documents import create_stuff_documents_chain
    from langchain.chains import create_retrieval_chain

    openai = get_langchain_chat_openai()

    # openai 是大模型问答
    doc_chain = create_stuff_documents_chain(openai, prompt)
    chain = create_retrieval_chain(retriever, doc_chain)
    # Run the following cell, we can see that the chatbot can answer the question correctly!
    # 运行下面的单元格，我们可以看到聊天机器人可以正确回答问题！
    # 支持多语言，可以用中文提问英文文档知识
    response = chain.invoke({"input": "M3-Embedding 代表什么?"})

    # print the answer only
    print("Question: M3-Embedding 代表什么?")
    print("Answer:")
    print(response['answer'])
