from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama
import asyncio

app = FastAPI()
llm = Llama(model_path="../../llama.cpp/deepseek-r1-1.5b.q4_k_m.gguf",
            n_gpu_layers=20,
            n_ctx=2048,
            n_threads=8,  # 根据 CPU 核心数调整
            clblast_device=0  # 使用第一个 OpenCL 设备
            )

class ChatRequest(BaseModel):
    messages: list[dict]
    max_tokens: int = 100
    temperature: float = 0.7

@app.post("/async_chat")
async def async_chat(request: ChatRequest):
    loop = asyncio.get_event_loop()
    # 异步执行模型推理
    response = await loop.run_in_executor(
        None,
        lambda: llm.create_chat_completion(
            messages=request.messages,
            max_tokens=request.max_tokens,
            temperature=request.temperature
        )
    )
    return {"response": response["choices"][0]["message"]["content"]}
