from fastapi import FastAPI, HTTPException, Header, Request
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
import asyncio
import json
import time
from typing import Optional, Dict, Any
import uvicorn

app = FastAPI()
# 添加 CORS 中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有源,生产环境建议设置具体的源
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有请求头
)
# 模拟 API key 验证
VALID_API_KEYS = {"sk-test123"}

model_name = 'workflow1119'
async def verify_api_key(api_key: str = Header(...)):
    if api_key not in VALID_API_KEYS:
        raise HTTPException(status_code=401, detail="Invalid API key")
    return api_key


# 模拟生成文本的异步生成器
async def fake_text_generator(prompt: str):
    response_template = {
        "id": "chatcmpl-123",
        "object": "chat.completion.chunk",
        "created": int(time.time()),
        "model":model_name,
        "choices": [{
            "index": 0,
            "delta": {},
            "finish_reason": None
        }]
    }

    # 发送角色信息
    first_chunk = response_template.copy()
    first_chunk["choices"][0]["delta"] = {"role": "assistant"}
    yield f"data: {json.dumps(first_chunk)}\n\n"

    # 模拟生成文本
    text = "这是一个模拟的回复，用于测试流式输出。"
    for char in text:
        chunk = response_template.copy()
        chunk["choices"][0]["delta"] = {"content": char}
        print('chunk',chunk)
        yield f"data: {json.dumps(chunk)}\n\n"
        await asyncio.sleep(0.1)  # 模拟延迟

    # 发送完成标记
    final_chunk = response_template.copy()
    final_chunk["choices"][0]["finish_reason"] = "stop"
    yield f"data: {json.dumps(final_chunk)}\n\ndata: [DONE]\n\n"


@app.post("/v1/chat/completions")
async def chat_completions(
        request: Request,
        api_key: str = Header(..., alias="Authorization")
):
    # 验证 API key
    api_key = api_key.replace("Bearer ", "")
    await verify_api_key(api_key)

    # 读取请求体
    body = await request.json()
    messages = body.get("messages", [])
    stream = body.get("stream", False)

    if not messages:
        raise HTTPException(status_code=400, detail="No messages provided")

    # 获取最后一条消息作为 prompt
    prompt = messages[-1].get("content", "")

    if stream:
        # 返回流式响应
        return StreamingResponse(
            fake_text_generator(prompt),
            media_type="text/event-stream"
        )
    else:
        # 返回完整响应
        return {
            "id": "chatcmpl-123",
            "object": "chat.completion",
            "created": int(time.time()),
            "model":model_name,
            "usage": {
                "prompt_tokens": 10,
                "completion_tokens": 20,
                "total_tokens": 30
            },
            "choices": [{
                "message": {
                    "role": "assistant",
                    "content": "这是一个模拟的回复。"
                },
                "finish_reason": "stop",
                "index": 0
            }]
        }


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)

