# RAG高级---自动合并检索

import os

from llama_index.readers.web import TrafilaturaWebReader
from llama_index import Document, SimpleDirectoryReader
from llama_index.node_parser import HierarchicalNodeParser
from llama_index.node_parser import get_leaf_nodes
from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index import load_index_from_storage
from llama_index.readers.web import TrafilaturaWebReader
from llama_index.text_splitter import SentenceSplitter
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.embeddings import resolve_embed_model
from llama_index.node_parser import SentenceWindowNodeParser
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index.llms import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine

api_key = "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"
os.environ['OPENAI_API_KEY'] = "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"

# 1、加载文件
documents = SimpleDirectoryReader(
    input_files=["./合同履约-合同结算管理操作手册.pdf"]
).load_data()
document = Document(text="\n\n".join([doc.text for doc in documents]))

# 2、设置文档层次结构 --- 这里我们设置了文档的层次结构为[2048, 512, 128]，这就意味着每个叶子节点的大小(chunk_size)为128，需要说明的是这里的128是只文档的token数为128
node_parser = HierarchicalNodeParser.from_defaults(
    chunk_sizes=[2048, 512, 128]
    # chunk_sizes=[4096, 1024, 256]
    # chunk_sizes=[4096, 512, 128]
)
# 分割为数据节点
nodes = node_parser.get_nodes_from_documents(documents)
# 获取叶子节点
leaf_nodes = get_leaf_nodes(nodes)
# 查看分割的数据
# print(len(nodes))
# print(leaf_nodes[30])


# 3、创建向量库索引
# 3.1 创建LLM
llm = OpenAI(api_key=api_key, model="gpt-3.5-turbo", temperature=0.1)
# 3.2 创建ServiceContext
auto_merging_context = ServiceContext.from_defaults(
    llm=llm,
    embed_model=OpenAIEmbedding(),
    node_parser=node_parser,
)
# 3.3 创建向量库索引
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
automerging_index = VectorStoreIndex(
    leaf_nodes,
    storage_context=storage_context,
    service_context=auto_merging_context
)
# 3.4 向量库持久化
automerging_index.storage_context.persist(persist_dir="./merging_index")

# 4、定义和执行检索器
base_retriever = automerging_index.as_retriever(
    similarity_top_k=12
)
retriever = AutoMergingRetriever(
    base_retriever,
    automerging_index.storage_context,
    verbose=True
)
# rerank = SentenceTransformerRerank(top_n=6, model="BAAI/bge-reranker-base")
auto_merging_engine = RetrieverQueryEngine.from_args(
    # retriever, node_postprocessors=[rerank]
    retriever, node_postprocessors=[]
)

# 5、提问
auto_merging_response = auto_merging_engine.query(
    "发票查询应该怎么做？?"
)
print("============================")
print(auto_merging_response)
print("============================")
