from fastapi import FastAPI
from pydantic import BaseModel
import os
import dashscope
from fastapi.responses import StreamingResponse
import asyncio

app = FastAPI()

# 通义千问 LLM API 配置（需替换为你的实际 API KEY 和 endpoint）
QWEN_API_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
QWEN_API_KEY = os.getenv("QWEN_API_KEY", "sk-3b76ba8c15ca4e31a23d61eff6cb61f4")
from openai import OpenAI


client = OpenAI(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
    api_key=QWEN_API_KEY,
    base_url=QWEN_API_URL,
)

class SemanticRequest(BaseModel):
    text: str
    session_id: str = None

class SemanticResponse(BaseModel):
    reply: str
    session_id: str = None

@app.post("/chat/semantic")
async def chat_semantic(req: SemanticRequest):
    messages = [
        {'role': 'system', 'content': 'You are a helpful assistant.'},
        {'role': 'user', 'content': req.text }
    ]
    responses = dashscope.Generation.call(
        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
        api_key=QWEN_API_KEY,
        model="qwen-plus", # 此处以qwen-plus为例，可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        messages=messages,
        result_format='message',
        stream=True,
        incremental_output=True
        )
    
    async def event_stream():
        for res in responses:
            if res.output.choices:
                yield f"data: {res.output.choices[0].message.content}\n\n"
            await asyncio.sleep(0.01)

    return StreamingResponse(event_stream(), media_type="text/event-stream")
