import ollama
import asyncio
import json
from websocket import create_connection, WebSocketTimeoutException
import threading
import time

# 定义WebSocket客户端连接
def websocket_client():
    # 连接到WebSocket服务器
    ws = create_connection("ws://106.75.148.173:9503/?user_id=AI_HOST", timeout=60000)
    print("上线成功，本机将作为AI服务器回复用户消息！")

    try:
        while True:
            try:
                # 等待服务器回复
                response = json.loads(ws.recv())
                print(f"来自服务器: {str(response)}\n")
                
                # 启用多线程处理每个请求
                threading.Thread(target=process_message, args=(response, ws)).start()
            except WebSocketTimeoutException:
                print("连接超时，尝试重新连接...")
                ws.close()
                ws = create_connection("ws://106.75.148.173:9503/?user_id=AI_HOST", timeout=60)
    except KeyboardInterrupt:
        print("\nExiting...")
    finally:
        # 关闭WebSocket连接
        ws.close()
        websocket_client()
        print("Connection closed.")

# 定义消息处理函数
def process_message(response, ws):
    current_time = time.time()
    # 生成AI回复
    stream = ollama.chat(
        model='qwen:1.8b',  # 使用的模型
        messages=[
            {"role": "system", "content": "你是智寻AI，由寻安科技开发的一款人工智能。"},
            {'role': 'user', 'content': response['message']}
        ],
        stream=True,  # 启用流式传输
    )
    print("AI回复：")
    for chunk in stream:
        print(chunk['message']['content'], end='', flush=True)
        print(chunk)
        ws.send(json.dumps({
            'userid': "AI_HOST",
            'target': str(response['userid']),
            'message': chunk['message']['content']
        }))
    ws.send(json.dumps({
        'userid': "AI_HOST",
        'target': str(response['userid']),
        'message': "<end>"
    }))
    print()
    print("发送成功！")

# 运行客户端
if __name__ == "__main__":
    websocket_client()