#!/bin/bash

# 模型路径（请根据实际情况修改）
MODEL_PATH="Qwen/Qwen2.5-7B-Instruct"
MODEL_NAME="Qwen2.5-7B"

# 张量并行大小（取决于你有多少张 GPU 卡）
TP_SIZE=1
# 服务监听地址和端口
HOST="0.0.0.0"
PORT=8080
LOG_DIR="../logs"
LOG_PREFIX="vllm_server"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
LOG_FILE="$LOG_DIR/${LOG_PREFIX}_${MODEL_NAME}_${TIMESTAMP}.log"
mkdir -p $LOG_DIR

# GPU 显存使用比例（建议不超过 0.9）
GPU_MEM_UTIL=0.8
MAX_MODEL_LEN=8192
# 函数：按端口查找并杀死进程
kill_port_process() {
    local port=$1
    # 查找占用端口的进程 PID
    local pid=$(lsof -t -i :$port)
    if [ ! -z "$pid" ]; then
        echo "[$(date)] Port $port is occupied by PID: $pid. Killing process..." >> $LOG_FILE
        kill -9 $pid && echo "[$(date)] Process PID $pid killed." >> $LOG_FILE || echo "[$(date)] Failed to kill PID $pid." >> $LOG_FILE
    else
        echo "[$(date)] Port $port is free." >> $LOG_FILE
    fi
}
# 检查并杀死占用 8080 端口的进程
kill_port_process $PORT
# 等待端口释放
sleep 2
# 记录启动信息
echo "[$(date)] Starting vLLM server for model: $MODEL_NAME" >> $LOG_FILE
echo "[$(date)] Log file: $LOG_FILE" >> $LOG_FILE

# 启动 vLLM OpenAI API 兼容的服务
python -m vllm.entrypoints.openai.api_server \
    --host $HOST \
    --port $PORT \
    --model $MODEL_PATH \
    --dtype bfloat16 \
    --trust-remote-code \
    --max-model-len $MAX_MODEL_LEN \
    --gpu_memory_utilization $GPU_MEM_UTIL \
    --tensor-parallel-size $TP_SIZE >> $LOG_FILE 2>&1 &