import socket
import json
import threading
import requests

# Ollama服务地址
OLLAMA_BASE_URL = "http://localhost:11434"

def generate_response(prompt, model="llama2"):
    """调用Ollama API生成回复"""
    try:
        response = requests.post(
            f"{OLLAMA_BASE_URL}/api/generate",
            json={
                "model": model,
                "prompt": prompt,
                "stream": False
            }
        )
        response.raise_for_status()
        return response.json()["response"].strip()
    except Exception as e:
        print(f"生成回复时出错: {e}")
        return "抱歉，生成回复时出错。"

def handle_client(client_socket):
    """处理客户端连接"""
    print("客户端已连接")
    model = "llama2"  # 默认模型
    
    try:
        # 初始问候
        initial_message = "服务端已启动。我是基于Ollama的AI助手，有什么可以帮助你的？"
        client_socket.send(initial_message.encode())
        
        while True:
            # 接收客户端消息
            client_message = client_socket.recv(4096).decode()
            if not client_message:
                break
            
            print(f"客户端: {client_message}")
            
            # 生成回复
            server_response = generate_response(client_message, model)
            print(f"服务端回复: {server_response}")
            
            # 发送回复给客户端
            client_socket.send(server_response.encode())
            
    except Exception as e:
        print(f"处理客户端连接时出错: {e}")
    finally:
        client_socket.close()
        print("客户端连接已关闭")

def start_server():
    """启动TCP服务器"""
    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server.bind(('localhost', 9999))
    server.listen(1)
    print("服务器已启动，等待客户端连接...")
    
    while True:
        client_sock, addr = server.accept()
        client_handler = threading.Thread(target=handle_client, args=(client_sock,))
        client_handler.start()

if __name__ == "__main__":
    start_server()
