#!/bin/bash
set -e
MTT_DIR=/data/zhaojiang.xue/MT-Transformer
PORT=8111
MODEL_PATH=/data/mtt/model_convert/llama-2-70b-chat-hf-fp16-convert-tp8-new/
# DATASET_NAME=sharegpt
# DATASET_PATH=/data/mtt/dataset/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json
DATASET_NAME=jd_test
DATASET_PATH=/data/zhaojiang.xue/vllm_mtt/xmtt/jd/gsm.jsonl
API_SERVER_ARGS="--device musa --tensor-parallel-size 8 -pp 1  --block-size 64 --max-num-seqs 128 --max-model-len 1024 --disable-log-stats --disable-log-requests --gpu-memory-utilization 0.95"

export PYTHONPATH=.:${MTT_DIR}/python

python -m vllm.entrypoints.openai.api_server --port ${PORT} --model ${MODEL_PATH} --trust-remote-code ${API_SERVER_ARGS} &
server_pid=$!
# timeout 60 bash -c "until curl http://localhost:${PORT}/v1/models; do sleep 1; done" || exit 1
echo "server_pid: ${server_pid}"
echo "export PYTHONPATH=.:${MTT_DIR}/python"
echo "python ./benchmarks/benchmark_serving_jd.py --backend vllm --model ${MODEL_PATH} --port ${PORT} --dataset-name ${DATASET_NAME} --dataset-path ${DATASET_PATH} --num-prompts 200 --request-rate 3.0 --disable-tqdm"
# kill $server_pid
