from llm import qwen2_5_7B_Instruct
from embeddings import embed_model_local_bge_small
from llama_index.core import Settings, StorageContext, SimpleDirectoryReader,load_index_from_storage
from llama_index.core.callbacks import LlamaDebugHandler, CallbackManager

Settings.llm = qwen2_5_7B_Instruct()
# 使用LlamaDebugHandler构建事件回溯器，以追踪LlamaIndex执行过程中发生的事件
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
Settings.callback_manager = CallbackManager([llama_debug])

# 加载向量模型
Settings.embed_model = embed_model_local_bge_small()

# 从存储文件中读取embedding向量和向量索引
storage_context = StorageContext.from_defaults(persist_dir="doc_emb")
index = load_index_from_storage(storage_context)
# 构建查询引擎
query_engine = index.as_query_engine(similarity_top_k=5)
# 查询获得答案
response = query_engine.query("不耐疲劳，口燥、咽干可能是哪些证候？")
print(response)
# get_llm_inputs_outputs 返回每个LLM调用的开始/结束事件
event_pairs = llama_debug.get_llm_inputs_outputs()
# print(event_pairs[0][1].payload.keys())
print(event_pairs[0][1].payload["formatted_prompt"])