#!/bin/bash
# # VLLM_WORKER_MULTIPROC_METHOD=spawn \
# #     vllm serve /data/nvme0n1p1/opt/zzg-cx/xiaoxing/qwen2.5-7b-merged --trust-remote-code --served-model-name qwen2_5 --gpu-memory-utilization 0.98 --tensor-parallel-size 2  --port 8080 --max-model-len 2048 --max-num-seqs 256
# NCCL_CUMEM_ENABLE=1 CUDA_VISIBLE_DEVICES=7 nohup vllm serve /data/nvme0n1p1/opt/zzg-cx/xiaoxing/qwen2.5-7b-merged --trust-remote-code --served-model-name xiaoxingv2 --gpu-memory-utilization 0.5 --port 8006 --max-model-len 2048 --max-num-seqs 256 --distributed-executor-backend ray > vllm.log 2>&1 &
export NCCL_CUMEM_ENABLE=1
# 使用第6号显卡
export CUDA_VISIBLE_DEVICES=6

# # 检查进程是否已存在
# if pgrep -f "vllm serve" > /dev/null; then
#     echo "vLLM is already running"
#     exit 1
# fi

# 启动vLLM
nohup vllm serve \
  /data/nvme0n1p1/opt/zzg-cx/xiaoxing/Qwen2.5-7B-Instruct_merged_0820 \
  --trust-remote-code \
  --served-model-name xiaoxingv3 \
  --host 0.0.0.0 \
  --port 8007 \
  --gpu-memory-utilization 0.95 \
  --max-model-len 2048 \
  --max-num-seqs 64 \
  --swap-space 16 \
  --disable-log-stats \
  --block-size 16 \
  --max-num-batched-tokens 4096 \
  --tensor-parallel-size 1 \
  > vllm_v3.log 2>&1 &

echo "vLLM started with PID: $!"