from llama_index.core import (
    StorageContext,
    load_index_from_storage,
    VectorStoreIndex,
    Document,
)
import chromadb
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding

# 设置嵌入模型为OpenAIEmbedding（默认使用text-embedding-ada-002）
embed_model = OpenAIEmbedding()
# load from disk
db2 = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db2.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(
    vector_store,
    embed_model=embed_model,
)

# Query Data from the persisted index
query_engine = index.as_query_engine()
response = query_engine.query("SimpleVectorStore是什么?")
print(response)
