# -*- coding: utf-8 -*-
from query_engine import set_vector_store, get_query_engine_for_session
from init_config import Config
from init_db import load_vector_store
from init_model import init_models

def main():

    embed_model, llm = init_models(Config.REMOTE_LLM_API)
    index = load_vector_store()
    set_vector_store(index)
    # 在处理请求时使用
    session_id = "user123"  # 从请求中获取
    query_engine = get_query_engine_for_session(session_id)

    # 示例查询
    while True:
        question = input("\n请输入劳动法相关问题（输入q退出）: ")
        if question.lower() == 'q':
            break
        
        # 执行查询
        response = query_engine.query(question)
        # 逐词输出结果
        # print(f"\n智能助手回答：\n")
        # for token in response.response_gen:
        #     print(token, end="", flush=True)
        print(f"\n智能助手回答：\n{response.response}")
        print("\n支持依据：")
        for idx, node in enumerate(response.source_nodes, 1):
            meta = node.metadata
            print(f"\n[{idx}] {meta['full_title']}")
            print(f"  来源文件：{meta['source_file']}")
            print(f"  法律名称：{meta['law_name']}")
            print(f"  条款内容：{node.text[:100]}...")
            print(f"  相关度得分：{node.score:.4f}")

if __name__ == "__main__":
    main()