import gradio as gr
import openai

# 设置阿里云百炼的 OpenAI 兼容接口地址和 API Key
client = openai.OpenAI(
    api_key="sk-ab53207bdefa4973a739ec64ea5b9d99",  # 替换为你的 API Key
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
)


# 流式生成函数
def stream_qwen3(prompt):
    response = client.chat.completions.create(
        model="qwen3",  # 指定模型名称
        messages=[{'role': 'system', 'content': 'You are a helpful assistant.'},
                  {'role': 'user', 'content': prompt}],
        stream=True,  # 启用流式输出
        stream_options={"include_usage": True}
    )

    for chunk in response:
        if chunk.choices[0].delta.content is not None:
            yield chunk.choices[0].delta.content


# 构建 Gradio 界面
# interface = gr.Interface(
#     fn=stream_qwen3,
#     inputs=gr.Textbox(label="输入提示", lines=5),
#     outputs=gr.Textbox(label="Qwen3 输出", streaming=True),
#     live=False,
#     title="Qwen3 流式对话 (OpenAI 接口)",
#     description="使用 OpenAI 兼容接口调用 Qwen3 模型，支持流式输出。"
# )
interface = gr.ChatInterface(
    stream_qwen3,
    chatbot=gr.Chatbot(height=400),
    textbox=gr.Textbox(placeholder="输入你的问题", container=False, scale=7),
    title="Qwen3 流式对话 (OpenAI 接口)",
    description="使用 OpenAI 兼容接口调用 Qwen3 模型，支持流式输出。",
    theme="soft",
    examples=["你好", "请介绍一下你自己", "如何用Python写一个爬虫"],
)

# 启动服务
if __name__ == "__main__":
    interface.launch()
