from llama_index.core import SimpleDirectoryReader

import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
print(f"current_dir: {current_dir}")
sys.path.insert(0, parent_dir)


from my_common import  get_openai_client, get_huggingface_embedding

"""
LlamaIndex is a very popular framework to help build connections between data sources and LLMs. It is also a top choice when people would like to build an RAG framework. In this tutorial, we will go through how to use LlamaIndex to aggregate bge-base-en-v1.5 and GPT-4o-mini to an RAG application.
LlamaIndex 是一个非常流行的框架，可帮助在数据源和 LLM 之间建立联系。当人们想要构建 RAG 框架时，它也是一个首选。在本教程中，我们将介绍如何使用 LlamaIndex 将 bge-base-zh-v1.5 和 Qwen-32B 聚合到 RAG 应用程序。
"""

if __name__ == "__main__":
    """
    1. Data
    First, download BGE-M3 paper to a directory, and load it through SimpleDirectoryReader.

    Note that SimpleDirectoryReader can read all the documents under that directory and supports a lot of commonly used file types.

    首先，将 BGE-M3 论文下载到一个目录下，然后通过 SimpleDirectoryReader 加载它。

    请注意，SimpleDirectoryReader 可以读取该目录下的所有文档，并支持许多常用的文件类型。
    """

    reader = SimpleDirectoryReader(input_files={f"{current_dir}/2402.03216v4.pdf"})
    # reader = SimpleDirectoryReader("DIR_TO_FILE")
    documents = reader.load_data()

    """
    The Settings object is a global settings for the RAG pipeline. Attributes in it have default settings and can be modified by users (OpenAI's GPT and embedding model). Large attributes like models will be only loaded when being used.
    
    Settings 对象是 RAG 管道的全局设置。其中的属性具有默认设置，用户可以修改（OpenAI 的 GPT 和嵌入模型）。像 models 这样的大型属性只有在使用时才会加载。

    Here, we specify the node_parser to SentenceSplitter() with our chosen parameters, use the open-source bge-base-en-v1.5 as our embedding model, and gpt-4o-mini as our llm.

    在这里，我们使用所选参数指定 SentenceSplitter（） 的node_parser，使用开源 bge-base-en-v1.5 作为我们的嵌入模型，并使用 qwen-32b 作为我们的 llm。
    """
    from llama_index.core import Settings
    from llama_index.core.node_parser import SentenceSplitter

    # set the parser with parameters
    Settings.node_parser = SentenceSplitter(
        chunk_size=1000,    # Maximum size of chunks to return
        chunk_overlap=150,  # number of overlap characters between chunks
    )

    # set the specific embedding model
    Settings.embed_model = get_huggingface_embedding()
    openai_client,_ = get_openai_client()
    # set the llm we want to use
    from llama_index.llms.openai import OpenAI
    Settings.llm = OpenAI(openai_client=openai_client)

    """
    2. Indexing
    Indexing is one of the most important part in RAG. LlamaIndex integrates a great amount of vector databases. Here we will use Faiss as an example.
    
    索引是 RAG 中最重要的部分之一。LlamaIndex 集成了大量的向量数据库。这里我们以 Faiss 为例。

    First check the dimension of the embeddings, which will need for initializing a Faiss index.

    首先检查嵌入的维度，初始化 Faiss 索引需要它。
    """
    embedding = Settings.embed_model.get_text_embedding("Hello world")
    dim = len(embedding)
    print("Dimension of the embedding:")
    print(dim)

    """
    Then create the index with Faiss and our documents. Here LlamaIndex help capsulate the Faiss function calls. If you would like to know more about Faiss, refer to the tutorial of Faiss and indexing.
    """

    import faiss
    from llama_index.vector_stores.faiss import FaissVectorStore
    from llama_index.core import StorageContext, VectorStoreIndex

    # init Faiss and create a vector store
    faiss_index = faiss.IndexFlatL2(dim)
    vector_store = FaissVectorStore(faiss_index=faiss_index)

    # customize the storage context using our vector store
    storage_context = StorageContext.from_defaults(
        vector_store=vector_store
    )

    # use the loaded documents to build the index
    index = VectorStoreIndex.from_documents(
        documents, storage_context=storage_context
    )

    """
    3. Retrieve and Generate
    With a well constructed index, we can now build the query engine to accomplish our task:
    检索和生成
    有了构建良好的索引，我们现在可以构建查询引擎来完成我们的任务：
    """
    query_engine = index.as_query_engine()
    # The following cell displays the default prompt template for Q&A in our pipeline:
    # check the default promt template
    # 以下单元格显示管道中 Q&A 的默认提示模板：
    # 检查默认 Promt 模板
    prompt_template = query_engine.get_prompts()['response_synthesizer:text_qa_template']
    print("Default Prompt Template:")
    print(prompt_template.get_template())

    # You could modify the prompt to match your use cases:
    from llama_index.core import PromptTemplate

    template = """
    You are a Q&A chat bot.
    Use the given context only, answer the question.

    <context>
    {context_str}
    </context>

    Question: {query_str}
    """

    new_template = PromptTemplate(template)
    query_engine.update_prompts(
        {"response_synthesizer:text_qa_template": new_template}
    )

    prompt_template = query_engine.get_prompts()['response_synthesizer:text_qa_template']
    print(prompt_template.get_template())

    # Finally, let's see how does the RAG application performs on our query!
    # 最后，让我们看看 RAG 应用程序如何处理我们的查询！
    response = query_engine.query("M3-Embedding 代表什么?")
    print("Response:")
    print(response)
