from flask import Flask, render_template, request, jsonify
from flask_socketio import SocketIO, emit
from openai import OpenAI
from dotenv import load_dotenv
import os

# 加载环境变量
load_dotenv()

app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="*")

# 初始化 OpenAI 客户端
client = OpenAI()

# 系统消息
system_message = {
    'role': 'system',
    'content': '我是你的助手，我的名字叫 Melon，我能够帮助您解决各种各样的问题!'
}


@app.route('/')
def index():
    return render_template('index.html')


@socketio.on('user_message')
def handle_user_message(data):
    user_message = data.get('message', '')
    if not user_message:
        return

    messages = [system_message, {'role': 'user', 'content': user_message}]
    
    # 使用流式响应
    response_stream = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=messages,
        stream=True
    )
    
    # 逐步处理并发送每个响应片段
    full_response = ""
    for chunk in response_stream:
        if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
            content = chunk.choices[0].delta.content
            full_response += content
            # 发送当前片段
            emit('assistant_stream', {
                'chunk': content,
                'full': full_response
            })
    print ('full_response:', full_response)
    
    # 发送完成信号
    emit('assistant_complete', {'message': full_response})


if __name__ == '__main__':
    socketio.run(app, allow_unsafe_werkzeug=True)
