from llama_index.core.llama_pack import download_llama_pack
from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core import  GPTVectorStoreIndex,VectorStoreIndex
from llama_index.llms import openai_like
from llama_index.core import Settings
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.huggingface import HuggingFaceEmbedding  # HuggingFaceEmbedding:用于将文本转换为词向量
from llama_index.llms.huggingface import HuggingFaceLLM  # HuggingFaceLLM：用于运行Hugging Face的预训练语言模型
from llama_index.core import Settings,SimpleDirectoryReader,VectorStoreIndex
import chromadb
from llama_index.embeddings.dashscope import DashScopeEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.deepseek  import DeepSeek
from llama_index.embeddings.fastembed import FastEmbedEmbedding
from  llama_index.packs.chroma_autoretrieval import ChromaAutoretrievalPack

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm
embed_model = FastEmbedEmbedding(model_name="BAAI/bge-small-en-v1.5")
Settings.embed_model = embed_model



vector_store_info = VectorStoreInfo(
    content_info="brief biography of celebrities",
    metadata_info=[
        MetadataInfo(
            name="category",
            type="str",
            description=(
                "Category of the celebrity, one of [Sports Entertainment, Business, Music]"
            ),
        ),
    ],
)

import chromadb

client = chromadb.EphemeralClient()



documents = SimpleDirectoryReader('data').load_data()

parser = SimpleNodeParser()

chroma_client = chromadb.PersistentClient(path="./chroma_db")
chroma_collection=chroma_client.get_collection("docs")

vector_store=ChromaVectorStore(chroma_collection=chroma_collection)

nodes = parser.get_nodes_from_documents(documents,vector_store=vector_store)

vector_store_index = VectorStoreIndex(nodes)

# create the pack
chroma_pack = ChromaAutoretrievalPack(
    collection_name="test",
    vector_store_info=vector_store_index,
    nodes=nodes,
    client=chroma_client,
)

response = chroma_pack.run("Tell me a bout a Music celebritiy.")
print(response)