# model-service/start_service.py
from vllm import EngineArgs, LLMEngine, SamplingParams
import asyncio
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI(title="SHEIN Model Service")

# 初始化vLLM引擎
engine_args = EngineArgs(
    model="/app/models/Qwen-7B-Chat",
    tensor_parallel_size=2,
    gpu_memory_utilization=0.8,
    max_model_len=32768,
    quantization="awq",  # 使用AWQ量化
    max_num_batched_tokens=2048
)
engine = LLMEngine.from_engine_args(engine_args)


@app.post("/v1/generate")
async def generate_text(request: dict):
    """文本生成接口"""
    sampling_params = SamplingParams(
        temperature=request.get("temperature", 0.7),
        top_p=request.get("top_p", 0.9),
        max_tokens=request.get("max_tokens", 512)
    )

    results = await engine.generate(
        prompts=[request["prompt"]],
        sampling_params=sampling_params
    )

    return {
        "text": results[0].outputs[0].text,
        "usage": {
            "prompt_tokens": len(results[0].prompt_token_ids),
            "completion_tokens": len(results[0].outputs[0].token_ids)
        }
    }


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)