import chromadb
from llama_index.core import Settings, StorageContext, VectorStoreIndex, get_response_synthesizer
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.indices.vector_store import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.vector_stores.chroma import ChromaVectorStore

from config.embeddings import embed_model_local_bge_small
from config.llm import deepseek_llm
from tool.file_tool import getRootPath

root_path = getRootPath()

# 设置LLM和Embedding模型
Settings.llm = deepseek_llm()
# 确保返回的是 BaseEmbedding 实例
embed_model_instance = embed_model_local_bge_small()
if not isinstance(embed_model_instance, BaseEmbedding):
    raise TypeError("Embedding model is not an instance of BaseEmbedding")

Settings.embed_model = embed_model_instance

# 初始化Chroma客户端
chroma_db = chromadb.PersistentClient(path=root_path + '/chroma_db')

#获取 collection
chroma_correction = chroma_db.get_or_create_collection("chroma-examples")

# 将chroma 分配到上下文中的VectorStore
vector_store = ChromaVectorStore(chroma_collection=chroma_correction)
storage_context = StorageContext.from_defaults(vector_store=vector_store)

# 加载索引
chroma_index = VectorStoreIndex.from_vector_store(vector_store, storage_context=storage_context)

# 配置检索器
chroma_retriever = VectorIndexRetriever(index = chroma_index,similarity_top_k=5)

#配置响应合成器
response_sythesizer = get_response_synthesizer()

# 组装查询引擎
query_engine = RetrieverQueryEngine(retriever=chroma_retriever,
                                    response_synthesizer=response_sythesizer)

response = query_engine.query("退订费用是多少？")
print(response)