# deploy_model.py
import argparse
import os
import torch
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List, Dict, Any
from vllm import LLM, SamplingParams

app = FastAPI(title="LLM Inference Service")

# 全局 LLM 实例
llm = None


class InferenceRequest(BaseModel):
    prompt: str
    temperature: float = 0.7
    max_tokens: int = 1024


def main():
    global llm
    parser = argparse.ArgumentParser(description='Deploy LLM with vLLM')
    # 核心参数
    parser.add_argument('--model', type=str, required=True, help="模型本地路径或Hugging Face仓库ID")
    parser.add_argument('--port', type=int, default=8000, help="服务端口")
    parser.add_argument('--host', type=str, default='0.0.0.0', help="绑定主机地址（0.0.0.0允许外部访问）")
    # 可选参数
    parser.add_argument('--cache-dir', type=str, default=None, help="模型缓存目录")
    parser.add_argument('--tensor-parallel-size', type=int, default=1, help="GPU并行数量")
    parser.add_argument('--gpu-memory-utilization', type=float, default=0.9, help="GPU内存利用率（0.0-1.0）")
    parser.add_argument('--max-model-len', type=int, default=4096, help="模型最大序列长度")
    parser.add_argument('--use-cpu', action='store_true', help="强制使用CPU运行")
    parser.add_argument('--quantization', type=str, default=None, help="量化方式（如awq、gptq-4bit）")
    args = parser.parse_args()

    # 设置GPU
    if args.gpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        print(f"Using GPUs: {args.gpu}")

    # 设置设备
    if args.use_cpu:
        device = "cpu"
        kwargs = {
            "dtype": torch.float32,  # CPU不支持半精度
        }
        print("Running model on CPU")
    else:
        device = "cuda"
        kwargs = {}
        print(f"Running model on GPU with {args.tensor_parallel_size} devices")

    # 初始化LLM
    llm = LLM(
        model=args.model,
        tensor_parallel_size=args.tensor_parallel_size,
        trust_remote_code=args.trust_remote_code,
        download_dir=args.cache_dir,  # 指定模型下载路径
        **kwargs
    )

    # 启动API服务
    print(f"Model {args.model} loaded. Serving on port {args.port}")
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=args.port)


@app.post("/generate")
async def generate(request: InferenceRequest):
    try:
        sampling_params = SamplingParams(
            temperature=request.temperature,
            max_tokens=request.max_tokens,
        )

        outputs = llm.generate(
            request.prompt,
            sampling_params,
        )

        return {
            "prompt": request.prompt,
            "generated_text": outputs[0].outputs[0].text,
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


if __name__ == "__main__":
    main()