# server.py
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from simple_vllm import SimpleLLMEngine
import uvicorn

app = FastAPI()
engine = SimpleLLMEngine("gpt2")


class GenerateRequest(BaseModel):
    prompt: str
    max_tokens: int = 100


class GenerateResponse(BaseModel):
    request_id: str
    text: str
    tokens_generated: int


@app.post("/generate")
async def generate(request: GenerateRequest):
    try:
        request_id = engine.add_request(request.prompt, request.max_tokens)

        # 等待生成完成
        while True:
            results = engine.step()
            if request_id in results:
                return GenerateResponse(
                    request_id=request_id,
                    text=results[request_id],
                    tokens_generated=len(engine.active_requests.get(request_id, {}).get('generated_tokens', []))
                )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)