# LLmaindex的全局参数设置
import asyncio

from llama_index.core import Settings
from llama_index.core.chat_engine import SimpleChatEngine


from llms import deepseek_llm,gangtise_llm

# from 父 import 儿子

# Settings.llm = deepseek_llm()
Settings.llm = gangtise_llm()
chat_engine = SimpleChatEngine.from_defaults()
# res = chat_engine.stream_chat(message="介绍一下性能测试")
# for token in res.response_gen:
#     print(token)
    

chat_engine.streaming_chat_repl()
# 协程
# import asyncio
#
# def say_hello():
#     print("Hello")
#     # await asyncio.sleep(1)  # 模拟一个耗时操作
#     print("World")
#
# async def main():
#     say_hello()  # 等待say_hello协程完成
#
# # 运行事件循环
# asyncio.run(main())