import json
import os

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.vector_stores.chroma import ChromaVectorStore
from pydantic.v1 import BaseModel

if __name__ == "__main__":
    def show_json(data):
        """用于展示json数据"""
        if isinstance(data, str):
            obj = json.loads(data)
            print(json.dumps(obj, indent=4))
        elif isinstance(data, dict) or isinstance(data, list):
            print(json.dumps(data, indent=4))
        elif issubclass(type(data), BaseModel):
            print(json.dumps(data.dict(), indent=4, ensure_ascii=False))


    def show_list_obj(data):
        """用于展示一组对象"""
        if isinstance(data, list):
            for item in data:
                show_json(item)
        else:
            raise ValueError("Input is not a list")

    os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY_ZHIHU"]
    os.environ["OPENAI_API_BASE"] = os.environ["OPENAI_API_BASE_ZHIHU"]
    os.environ["OPENAI_BASE_URL"] = os.environ["OPENAI_API_BASE_ZHIHU"]

    import chromadb
    from chromadb.config import Settings

    # 创建 Chroma Client
    # EphemeralClient 在内存创建；如果需要存盘，可以使用 PersistentClient
    chroma_client = chromadb.EphemeralClient(settings=Settings(allow_reset=True))

    documents = SimpleDirectoryReader("./llamaIndexDatas",recursive=True,required_exts=[".pdf"]).load_data()
    node_parser = TokenTextSplitter(chunk_size=300, chunk_overlap=100)

    # 切分文档
    nodes = node_parser.get_nodes_from_documents(documents)

    #index = VectorStoreIndex.from_documents(documents)
    chroma_client.reset()
    chroma_collection = chroma_client.create_collection("custom_vector_db_demo1")

    # 创建 Vector Store
    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)

    # Storage Context 是 Vector Store 的存储容器，用于存储文本、index、向量等数据
    storage_context = StorageContext.from_defaults(vector_store=vector_store)

    # 创建 index：通过 Storage Context 关联到自定义的 Vector Store
    index = VectorStoreIndex(nodes, storage_context=storage_context)

    # 获取 retriever
    vector_retriever = index.as_retriever(similarity_top_k=2)

    # 检索
    results = vector_retriever.retrieve("Llama2有多少参数")

    show_list_obj(results)