sync: eqbench-ja-run/serve_judge.sh
Browse files
eqbench-ja-run/serve_judge.sh
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# =============================================================================
|
| 3 |
+
# serve_judge.sh — Qwen3.5-35B-A3B(採点者)vLLM サーバー起動
|
| 4 |
+
# モデル: Qwen/Qwen3.5-35B-A3B(MoE: 35B全weights/3B active params)
|
| 5 |
+
# ポート: 8001
|
| 6 |
+
# VRAM使用量: ~70GB(全weights bf16)
|
| 7 |
+
#
|
| 8 |
+
# ⚠️ メモリ注意:
|
| 9 |
+
# A100 80GB での同時起動:
|
| 10 |
+
# - TeenEmo (serve_test.sh, GPU_UTIL=0.10) + Judge (GPU_UTIL=0.88) = ~98%
|
| 11 |
+
# - OOM する場合は serve_test.sh を停止してからこちらを起動し、
|
| 12 |
+
# 順次実行モード(setup_eqbench_run.sh の指示に従う)で対応する
|
| 13 |
+
#
|
| 14 |
+
# Qwen3.5-35B-A3B は VLM のため --language-model-only が必要
|
| 15 |
+
# (pipeline_tag: image-text-to-text, HF: https://huggingface.co/Qwen/Qwen3.5-35B-A3B)
|
| 16 |
+
# =============================================================================
|
| 17 |
+
|
| 18 |
+
set -euo pipefail
|
| 19 |
+
|
| 20 |
+
JUDGE_MODEL="${JUDGE_MODEL:-Qwen/Qwen3.5-35B-A3B}"
|
| 21 |
+
PORT="${JUDGE_PORT:-8001}"
|
| 22 |
+
HOST="${VLLM_HOST:-0.0.0.0}"
|
| 23 |
+
GPU_UTIL="${JUDGE_GPU_UTIL:-0.88}"
|
| 24 |
+
MAX_MODEL_LEN="${JUDGE_MAX_MODEL_LEN:-8192}"
|
| 25 |
+
DTYPE="${VLLM_DTYPE:-auto}"
|
| 26 |
+
MAX_NUM_SEQS="${VLLM_MAX_NUM_SEQS:-16}"
|
| 27 |
+
|
| 28 |
+
echo "=== Qwen3.5-35B-A3B 採点者サーバー起動 ==="
|
| 29 |
+
echo " モデル : ${JUDGE_MODEL}"
|
| 30 |
+
echo " ポート : ${PORT}"
|
| 31 |
+
echo " GPU 使用率 : ${GPU_UTIL}"
|
| 32 |
+
echo " ⚠️ TeenEmoと同時起動の場合は serve_test.sh の GPU_UTIL=0.10 を確認してください"
|
| 33 |
+
echo ""
|
| 34 |
+
|
| 35 |
+
python -c "import vllm; print(f' vLLM バージョン : {vllm.__version__}')" 2>/dev/null || true
|
| 36 |
+
echo ""
|
| 37 |
+
|
| 38 |
+
exec vllm serve "${JUDGE_MODEL}" \
|
| 39 |
+
--host "${HOST}" \
|
| 40 |
+
--port "${PORT}" \
|
| 41 |
+
--dtype "${DTYPE}" \
|
| 42 |
+
--gpu-memory-utilization "${GPU_UTIL}" \
|
| 43 |
+
--max-model-len "${MAX_MODEL_LEN}" \
|
| 44 |
+
--tensor-parallel-size 1 \
|
| 45 |
+
--max-num-seqs "${MAX_NUM_SEQS}" \
|
| 46 |
+
--language-model-only \
|
| 47 |
+
--enable-prefix-caching \
|
| 48 |
+
--trust-remote-code
|