#!/bin/bash

set -ex

export RES_DIR_PATH=/data1/jzh/p2p

# 默认参数
model_dir=/data1/Qwen3-235B-A22B-FP8-Block64
port=8080

# 定义测试用例：二维数组形式，每个元素为 (max_concurrency, input_len, output_len)
# 可根据需求自定义组合，例如：
test_cases=(
  "300 4096 256"    # 测试项1：并发74，输入2048，输出512
  "150 8192 512"   # 测试项2：并发74，输入4096，输出1024
  "40 40960 2048"  # 测试项3：并发100，输入2048，输出1024
  "92 16384 1024"  # 测试项4：并发100，输入8192，输出2048
  "150 4096 512"   # 测试项5：并发150，输入4096，输出512
  "150 16384 2048" # 测试项6：并发150，输入16384，输出2048
)

# 解析命令行参数
while getopts "m:p:" opt; do
    case $opt in
        m) model_dir=$OPTARG ;;
        p) port=$OPTARG ;;
        *) echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
           exit 1
           ;;
    esac
done

# 检查输出CSV文件参数
if [ $OPTIND -gt $# ]; then
    echo "Error: Please specify output CSV file as the last argument"
    echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
    exit 1
fi
output_csv=${!#}

# 写入CSV表头
echo 'max_concurrency,random_input_len,random_output_len,total_input_tokens,total_output_tokens,concurrency,duration,request_throughput,input_throughput,output_throughput,total_throughput,mean_ttft_ms,median_ttft_ms,std_ttft_ms,p99_ttft_ms,mean_tpot_ms,median_tpot_ms,std_tpot_ms,p99_tpot_ms,mean_itl_ms,median_itl_ms,std_itl_ms,p99_itl_ms' > "$output_csv"

# 单循环遍历所有测试用例（二维数组元素）
for case in "${test_cases[@]}"; do
    # 拆分当前测试项的三个参数
    IFS=' ' read -r max_concurrency input_len output_len <<< "$case"
    
    echo "Starting test case: max_concurrency=$max_concurrency, input_len=$input_len, output_len=$output_len"
    
    # 每组用例运行3次测试
    for ((i=0; i<3; i++)); do
        python3 -m sglang.bench_serving \
            --host 0.0.0.0 \
            --port "$port" \
            --backend sglang \
            --model "$model_dir" \
            --tokenizer "$model_dir" \
            --dataset-name random \
            --dataset-path /data1/dataset/ShareGPT_V3_unfiltered_cleaned_split.json \
            --max-concurrency "$max_concurrency" \
            --output-file "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${i}.json" \
            --num-prompts "$max_concurrency" \
            --random-input-len "$input_len" \
            --random-output-len "$output_len"
    done

    # 提取最后一次测试结果并追加到CSV
    result_file="${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-2.json"
    if [ -f "$result_file" ]; then
        jq -cr '.max_concurrency,
               .random_input_len,
               .random_output_len,
               .total_input_tokens,
               .total_output_tokens,
               .concurrency,
               .duration,
               .request_throughput,
               .input_throughput,
               .output_throughput,
               (.input_throughput + .output_throughput),
               .mean_ttft_ms,
               .median_ttft_ms,
               .std_ttft_ms,
               .p99_ttft_ms,
               .mean_tpot_ms,
               .median_tpot_ms,
               .std_tpot_ms,
               .p99_tpot_ms,
               .mean_itl_ms,
               .median_itl_ms,
               .std_itl_ms,
               .p99_itl_ms' "$result_file" |
        tr '\n' ',' |
        sed 's/,$/\n/' >> "$output_csv"
    else
        echo "Warning: Result file $result_file not found, skipping..."
    fi

    # 清理当前测试项的临时文件
    rm -f "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-"*.json
done