from openai import OpenAI
from config.config import OPENAPI_API_KEY_3, OPENAPI_API_BASE_3
from fastapi.responses import StreamingResponse
import asyncio

## 初始化openai的参数
client = OpenAI(
    api_key=OPENAPI_API_KEY_3,
    base_url=OPENAPI_API_BASE_3
)


def streamWeatherReport():
    messages = [
        {
            "role": "system",
            "content": f"请扮演天气预报播报员，使用生动、形象、简洁的愈发介绍的天气（不需要真实数据，模拟即可）"
        },
        {
            "role": "user",
            "content": f"天津天气怎么样"
        }
    ]
    response = client.chat.completions.create(
        model="gpt-3.5-turbo-0125",
        messages=messages,
        temperature=0.9,  ## float 控制输出“随机性”（创意 vs 稳定）
        # max_tokens=50,  ## int 限制返回结果的最大长度
        stream=True  ## 设置是否流式输出  True | False
    )

    async def event_generator():
        for chunk in response:
            delta = chunk.choices[0].delta
            if delta and delta.content:
                yield delta.content
                await asyncio.sleep(0)  # 让出事件循环控制权

    return StreamingResponse(event_generator(), media_type="text/plain")
