from llm import qwen2_5_7B_Instruct
from embeddings import embed_model_local_bge_small
from llama_index.core import Settings, StorageContext, SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.callbacks import LlamaDebugHandler, CallbackManager

Settings.llm = qwen2_5_7B_Instruct()
# 使用LlamaDebugHandler构建事件回溯器，以追踪LlamaIndex执行过程中发生的事件
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager = CallbackManager([llama_debug])
Settings.callback_manager = callback_manager

# 加载向量模型
embedding_model = embed_model_local_bge_small()
Settings.embed_model = embedding_model

#加载文档
documents = SimpleDirectoryReader('./documents').load_data()

# 构建向量索引
vector_index = VectorStoreIndex.from_documents(documents)

#向量存储
vector_index.storage_context.persist(persist_dir='./doc_emb')
print("向量持久化到本地")