import chromadb
from llama_index.core import SimpleDirectoryReader, StorageContext, VectorStoreIndex, Settings
from llama_index.core.node_parser import SentenceSplitter
from llama_index.vector_stores.chroma import ChromaVectorStore

from config.embeddings import embed_model_local_bge_small
from config.llm import deepseek_llm
from tool.file_tool import getRootPath

root_path = getRootPath()

#加载一些文档
documents = SimpleDirectoryReader(root_path + '/data').load_data()

#  llm 指定LLM模型, embed_model 指定embedding模型
# 在 v0.10.0之后，使用Setting来设置全局的LLM和Embedding模型
Settings.llm = deepseek_llm()
Settings.embed_model = embed_model_local_bge_small()

#chunk_size 文档块大小，chunk_overlap 文档块重叠度
Settings.node_parser = SentenceSplitter(chunk_size=500,chunk_overlap=100)

# 自定义向量存储
#初始化chroma客户端，并设置保存数据的路径
chroma_db = chromadb.PersistentClient(path=root_path + '/chroma_db')

#创建correction
chroma_correction = chroma_db.get_or_create_collection("airline-service")

#将 chroma 分配为上下文中的vector_store
vector_store = ChromaVectorStore(chroma_collection=chroma_correction)
storage_context = StorageContext.from_defaults(vector_store=vector_store)

# 创建索引
vector_index = VectorStoreIndex.from_documents(documents,storage_context=storage_context)

# 指定响应模式
tree_query_engine = vector_index.as_query_engine(response_mode="tree_summarize")
response = tree_query_engine.query("退订费用是多少？")
print("tree_query_engine query response:",response)

# 启用流式响应
stream_query_engine = vector_index.as_query_engine(streaming=True)
response = stream_query_engine.query("退订费用是多少？")
print("stream_query_engine query response:")
response.print_response_stream()