import json
import os
import time

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.core.extractors import TitleExtractor
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import TokenTextSplitter, SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import PyMuPDFReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from pydantic.v1 import BaseModel


if __name__ == "__main__":
    class Timer:
        def __enter__(self):
            self.start = time.time()
            return self

        def __exit__(self, exc_type, exc_val, exc_tb):
            self.end = time.time()
            self.interval = self.end - self.start
            print(f"耗时 {self.interval * 1000} ms")

    os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY_ZHIHU"]
    os.environ["OPENAI_API_BASE"] = os.environ["OPENAI_API_BASE_ZHIHU"]
    os.environ["OPENAI_BASE_URL"] = os.environ["OPENAI_API_BASE_ZHIHU"]

    import chromadb
    from chromadb.config import Settings

    # 创建 Chroma Client
    # EphemeralClient 在内存创建；如果需要存盘，可以使用 PersistentClient
    chroma_client = chromadb.EphemeralClient(settings=Settings(allow_reset=True))

    documents = SimpleDirectoryReader("./llamaIndexDatas",recursive=True,required_exts=[".pdf"],file_extractor={".pdf":PyMuPDFReader()}).load_data()
    # node_parser = TokenTextSplitter(chunk_size=300, chunk_overlap=100)
    # 切分文档
    # nodes = node_parser.get_nodes_from_documents(documents)

    #index = VectorStoreIndex.from_documents(documents)
    chroma_client.reset()
    chroma_collection = chroma_client.create_collection("custom_vector_db_demo2")

    # 创建 Vector Store
    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)

    # Storage Context 是 Vector Store 的存储容器，用于存储文本、index、向量等数据
    # storage_context = StorageContext.from_defaults(vector_store=vector_store)

    pipeline = IngestionPipeline(
        transformations=[
            SentenceSplitter(chunk_size=300,chunk_overlap=100),# 按句子切分
            TitleExtractor(),# 利用 LLM 对文本生成标题
            OpenAIEmbedding(),# 将文本向量化
        ],
        vector_store=vector_store
    )

    try:
        pipeline.load('./pipeline_storage')
    except:
        pass

    with Timer():
        nodes = pipeline.run(documents=documents)

    # 创建 index：通过 Storage Context 关联到自定义的 Vector Store
    index = VectorStoreIndex.from_vector_store(vector_store)

    pipeline.persist('./pipeline_storage')

    # 获取 retriever
    vector_retriever = index.as_retriever(similarity_top_k=5)

    # 检索
    results = vector_retriever.retrieve("Llama2 能商用吗？")

    from llama_index.core.postprocessor import SentenceTransformerRerank

    # 检索后排序模型
    postprocessor = SentenceTransformerRerank(
        model="C:/Users/adind/.cache/modelscope/hub/quietnight/bge-reranker-large", top_n=2
    )

    results = postprocessor.postprocess_nodes(results, query_str="Llama2 能商用吗?")

    for i,result in enumerate(results):
        print(f"[{i}] {result.text}")


