import chromadb
from llama_index.core import SimpleDirectoryReader, StorageContext

from llama_index.core import VectorStoreIndex, get_response_synthesizer
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore

from llamaIndex.CommonClient import llm
documents = SimpleDirectoryReader("./data").load_data()
db = chromadb.PersistentClient(path="./chroma_db")

# create collection
chroma_collection = db.get_or_create_collection("quickstart")

# assign chroma as the vector_store to the context
# pip install llama-index-vector-stores-chroma
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)


openAIEmbeddings=OpenAIEmbedding(api_key="sk-CftUbVSsA61lwwgMz9xvt6znTunQZfgBP8ZCVLbQsKfXUR6k",
    model='text-embedding-3-small',
    api_base="https://www.henapi.top/v1")

# # 这个是会存储向量的
# index = VectorStoreIndex.from_documents(
#     documents, storage_context=storage_context,embed_model=openAIEmbeddings,show_progress=True
# )

# create your index
# 加载存储的向量
index = VectorStoreIndex.from_vector_store(
    vector_store, storage_context=storage_context,embed_model=openAIEmbeddings,show_progress=True
)
query_engine = index.as_query_engine(llm=llm)
response = query_engine.query("What was Nike's revenue in 2023?")
print(response)




# --------------------------------------------------------------------------------------------------------
# # 以下是文档的，不是向量的
# # build index
# index = VectorStoreIndex.from_documents(documents,embed_model=openAIEmbeddings,show_progress=True)
#
#
# # configure retriever
# retriever = VectorIndexRetriever(
#     index=index,
#     similarity_top_k=10,
# )
#
# # configure response synthesizer
# response_synthesizer = get_response_synthesizer(llm=llm)
#
# # assemble query engine
# query_engine = RetrieverQueryEngine(
#     retriever=retriever,
#     response_synthesizer=response_synthesizer,
#     node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=0.7)],
# )
#
# # query
# response = query_engine.query("What was Nike's revenue in 2023?")
# print(response)

# --------------------------------------------------------------------------------------------------------