from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, StorageContext, load_index_from_storage
from llama_index.core.chat_engine.types import ChatMode
from llama_index.core.memory import ChatMemoryBuffer
from openai import chat
from llms import deepseek_llm
from embeddings import embed_model_local_bge_small
Settings.llm = deepseek_llm()
Settings.embed_model = embed_model_local_bge_small()


# 加载数据（数据连接器）
data = SimpleDirectoryReader(input_dir="data").load_data()

# # 构建索引，正常执行一次即可
# index = VectorStoreIndex.from_documents(data, show_progress=True)
# # 对向量数据库做持久化
# index.storage_context.persist(persist_dir="index")


storage_context=StorageContext.from_defaults(persist_dir="index")
index = load_index_from_storage(storage_context)


# q=index.as_query_engine()
# # res=q.query('本周转债行情表现')
# res=q.query('本周转债条款信息')
# print(res)


chat_engine=index.as_chat_engine(
        chat_mode=ChatMode.CONTEXT,
    system_prompt="""
    You are a helpful assistant that answers questions about the bond market.
    """
)
chat_engine.streaming_chat_repl()




