#!/bin/bash

set -ex
# pushd /sgl-workspace/sglang/python
export RES_DIR_PATH=/sgl-workspace/sglang/python

model_dir=/v1/models
port=30011
num_prompts=8
performance_test_scale=32,64,128
while getopts "m:p:" opt; do
    case $opt in
        m) model_dir=$OPTARG ;;
        p) port=$OPTARG ;;
        *) echo "Usage: $0 [-m hf_model_dir] [-p port]"
           exit 1
           ;;
    esac
done


IFS=',' read -ra substrings <<< "$performance_test_scale"
echo 'max_concurrency,random_input_len,random_output_len,total_input_tokens,total_output_tokens,concurrency,duration,request_throughput,input_throughput,output_throughput,mean_ttft_ms,median_ttft_ms,std_ttft_ms,p99_ttft_ms,mean_tpot_ms,median_tpot_ms,std_tpot_ms,p99_tpot_ms,mean_itl_ms,median_itl_ms,std_itl_ms,p99_itl_ms' > benchres.csv

for max_concurrency in {1,8,16,32,64,128,256,512,1024}; do
    num_prompts=$((2 * $max_concurrency))
    for ((i=0; i<3; i++)); do
        python3 -m sglang.bench_serving \
            --host 0.0.0.0 \
            --port $port \
            --backend sglang \
            --model $model_dir \
            --dataset-path /models/dataset/ShareGPT_V3_unfiltered_cleaned_split.json \
            --max-concurrency $max_concurrency \
            --output-file subres$max_concurrency-32-$i.json \
            --num-prompts $num_prompts \
            --random-input-len 100 \
            --random-output-len 1000
    done
    cat $RES_DIR_PATH/subres$max_concurrency-32-2.json|jq -cr '.max_concurrency,.random_input_len,.random_output_len,.total_input_tokens,.total_output_tokens,.concurrency,.duration,.request_throughput,.input_throughput,.output_throughput,total_throughput,total_throughput,.mean_ttft_ms,.median_ttft_ms,.std_ttft_ms,.p99_ttft_ms,.mean_tpot_ms,.median_tpot_ms,.std_tpot_ms,.p99_tpot_ms,.mean_itl_ms,.median_itl_ms,.std_itl_ms,.p99_itl_ms'|tr '\n' ',' |sed 's/,$/\n\n/' >> $1
    rm $RES_DIR_PATH/subres$max_concurrency-32-0.json
    rm $RES_DIR_PATH/subres$max_concurrency-32-1.json
    rm $RES_DIR_PATH/subres$max_concurrency-32-2.json
done
for max_concurrency in {50,100,200,400,800,1200,1600}; do
    num_prompts=$((2 * $max_concurrency))
    for ((i=0; i<3; i++)); do
        python3 -m sglang.bench_serving \
            --host 0.0.0.0 \
            --port $port \
            --backend sglang \
            --model $model_dir \
            --dataset-path /models/dataset/ShareGPT_V3_unfiltered_cleaned_split.json \
            --max-concurrency $max_concurrency \
            --output-file subres$max_concurrency-32.json \
            --num-prompts $num_prompts \
            --random-input-len 1024 \
            --random-output-len 4096
    done
    cat $RES_DIR_PATH/subres$max_concurrency-32-2.json|jq -cr '.max_concurrency,.random_input_len,.random_output_len,.total_input_tokens,.total_output_tokens,.concurrency,.duration,.request_throughput,.input_throughput,.output_throughput,(.input_throughput + .output_throughput),.mean_ttft_ms,.median_ttft_ms,.std_ttft_ms,.p99_ttft_ms,.mean_tpot_ms,.median_tpot_ms,.std_tpot_ms,.p99_tpot_ms,.mean_itl_ms,.median_itl_ms,.std_itl_ms,.p99_itl_ms'|tr '\n' ',' |sed 's/,$/\n\n/' >> $1
    rm $RES_DIR_PATH/subres$max_concurrency-32-0.json
    rm $RES_DIR_PATH/subres$max_concurrency-32-1.json
    rm $RES_DIR_PATH/subres$max_concurrency-32-2.json
done
# popd
