from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, StorageContext, load_index_from_storage
from llama_index.core.chat_engine.types import ChatMode
from llama_index.core.memory import ChatMemoryBuffer
from llms import deepseek_llm
from embeddings import embed_model_local_bge_small
Settings.llm = deepseek_llm()
Settings.embed_model = embed_model_local_bge_small()

def index_data():
    """
    索引数据，执行一次即可
    :return:
    """
    # 加载数据（数据连接器）
    data = SimpleDirectoryReader(input_dir="./data").load_data()
    # 构建索引，正常执行一次即可
    index = VectorStoreIndex.from_documents(data, show_progress=True)
    # 对向量数据库做持久化
    index.storage_context.persist(persist_dir="./index")
async def create_chat_engine_rag():
    storage_context = StorageContext.from_defaults(persist_dir="./index")
    # 加载索引
    index = load_index_from_storage(storage_context)
    # 构建内存，存储聊天历史
    memory = ChatMemoryBuffer.from_defaults(token_limit=1024)
    chat_engine = index.as_chat_engine(
        chat_mode=ChatMode.CONTEXT,
        memory=memory,
        system_prompt=(
            "你是一个AI助手，可以基于用户提供的上下文内容，回答用户的问题。不能肆意编造回答。"
        ),
    )
    return chat_engine
if __name__ == "__main__":
    index_data()