# app.py
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from vllm import LLM, SamplingParams
import uvicorn

app = FastAPI(title="Qwen2.5-3B API")


# 模型初始化
class ModelConfig:
    def __init__(self):
        self.model_path = "/home/featurize/work/models/Qwen2.5-3B-Instruct"  # 替换为实际路径
        self.llm = None


model_config = ModelConfig()


# 请求/响应模型
class ChatRequest(BaseModel):
    prompt: str
    temperature: float = 0.7
    top_p: float = 0.9
    max_tokens: int = 1024


class ChatResponse(BaseModel):
    response: str
    prompt_tokens: int
    generated_tokens: int


# 初始化模型
@app.on_event("startup")
async def startup_event():
    try:
        model_config.llm = LLM(
            model=model_config.model_path,
            tensor_parallel_size=1  # 根据GPU数量调整
        )
        print("模型加载成功")
    except Exception as e:
        print(f"模型加载失败: {e}")
        raise HTTPException(status_code=500, detail="模型初始化失败")


# 聊天接口
@app.post("/v1/chat/completions", response_model=ChatResponse)
async def chat_completion(request: ChatRequest):
    try:
        sampling_params = SamplingParams(
            temperature=request.temperature,
            top_p=request.top_p,
            max_tokens=request.max_tokens
        )

        outputs = model_config.llm.generate(request.prompt, sampling_params)
        output = outputs[0]

        return ChatResponse(
            response=output.outputs[0].text,
            prompt_tokens=len(output.prompt_token_ids),
            generated_tokens=len(output.outputs[0].token_ids)
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"生成响应时出错: {str(e)}")


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)