from fastapi import FastAPI
from pydantic import BaseModel

from ipex_models.qwen2_chat_xpu import Qwen2ChatModel

# FastAPI 应用
app = FastAPI()

# 初始化模型和分词器
model_path = r"C:\models\Qwen\Qwen2.5-7B-Instruct"

qwen2_chat_model = Qwen2ChatModel(model_name=model_path)


# OpenAI API 请求格式
class CompletionRequest(BaseModel):
    prompt: str
    max_tokens: int = 512
    stream: bool = False


# OpenAI API 兼容的推理端点
@app.post("/v1/completions")
async def create_completion(request: CompletionRequest):
    if request.model_name == "Qwen2.5-7B-Instruct":
        pass
    elif request.model_name == "Qwen2-VL-2B-Instruct":
        pass

    if request.stream:
        # 流式返回
        return
    else:
        response = qwen2_chat_model.chat(request.prompt)
        return {
            "choices": [{"text": response}],
            "usage": {
                "generated_tokens": 0,
                "elapsed_time": 0
            }
        }


# 运行服务
if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)

"""
curl -X POST "http://127.0.0.1:8000/v1/completions" \
-H "Content-Type: application/json" \
-d '{
  "prompt": "用python实现快排!",
  "max_tokens": 512,
  "stream": false
}'

curl -X POST "http://127.0.0.1:8000/v1/completions" \
-H "Content-Type: application/json" \
-d '{
  "prompt": "用python实现快排!",
  "max_tokens": 512,
  "stream": true
}'
"""
