from flask import Flask, Response, stream_with_context, render_template
import openai
import time

from openai import OpenAI

app = Flask(__name__)

openai_api_key = "EMPTY"
openai_api_base = "http://192.168.50.109:8000/v1"


# 主页路由
@app.route('/')
def index():
    return render_template('index.html')  # 渲染 templates/index.html


def large_model_response():
    client = OpenAI(
        api_key=openai_api_key,
        base_url=openai_api_base,
    )
    # 调用 OpenAI API，启用流式输出
    # response = openai.ChatCompletion.create(
    #   model="Qwen2.5-1.5B-Instruct",
    #    messages=[{"role": "user", "content": "请生成一段长文本。"}],
    #    stream=True  # 启用流式输出
    # )

    chat_response = client.chat.completions.create(
        model="Qwen2.5-1.5B-Instruct",
        messages=[
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": "轻写一个"},
        ],
        stream=True  # 启用流式输出
    )
    for chunk in chat_response:
        if hasattr(chunk.choices[0].delta, "content"):
            print(chunk.choices[0].delta.content)
            yield 'data: {}\n\n'.format(chunk.choices[0].delta.content)
    yield 'data: [DONE]\n\n'


@app.route('/stream')
def stream():
    print("========开始执行流输出========")
    # 使用 stream_with_context 包装生成器函数
    return Response(stream_with_context(large_model_response()), content_type='text/event-stream')


if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0')
