from openai import OpenAI
from IPython.display import display, clear_output
from ollama import chat
from ollama import ChatResponse

client = OpenAI(
    # base_url='http://121.43.177.175:11434/v1/',
    # base_url='http://192.168.0.103:11434/v1/',
    base_url='http://localhost:11434/v1/',
    api_key='llama3.2:latest',  #实际上本地模型不需要api_key
)
content = "帮我预定下明天下午3点-5点的会议"
# content = "明天下午3点-5点的1001会议室有安排吗"
# content = "明天的会议咋安排的？"


# response: ChatResponse = chat(model='qwen2', messages=[
#     {
#         'role': 'user',
#         'content': f'''
#         {content}
# ''',
#     },
# ])
# print(response['message']['content'])
# # or access fields directly from the response object
# print(response.message.content)

completion = client.chat.completions.create(
    messages=[
        {
            'role': 'user',
            'content': f'''请从下面的内容中理解用户的意图是否属于 预定会议 ，如果是，则分析是否包含预定时间，会议室且缺一不可，否则继续追问缺少的信息，请尽量简单的回复
            {content}
            ''',
        }
    ],
    model='deepseek-r1:8b',
    # model='llama3.2',
    stream=True  # add this line to enable streaming output
)

response = ""
for chunk in completion:
    response += chunk.choices[0].delta.content
    print(response)
    clear_output(wait=True)  # 可能导致输出不完整
