#!/bin/bash

set -ex

export RES_DIR_PATH=/data1/jzh/p2p

# 默认参数
model_dir=/models/DeepSeek-R1
port=8080

# 定义测试用例：每个元素为 (max_concurrency, input_len, output_len, num_prompts)
test_cases=(
  "128 512 500 500"
  "256 512 400 500"
  "512 512 300 500"
  "1024 512 200 500"
)

# 解析命令行参数
while getopts "m:p:" opt; do
    case $opt in
        m) model_dir=$OPTARG ;;
        p) port=$OPTARG ;;
        *) echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
           exit 1
           ;;
    esac
done

# 检查输出CSV文件参数
if [ $OPTIND -gt $# ]; then
    echo "Error: Please specify output CSV file as the last argument"
    echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
    exit 1
fi
output_csv=${!#}

# 写入CSV表头
echo 'max_concurrency,random_input_len,random_output_len,total_input_tokens,concurrency,duration,request_throughput,input_throughput,output_throughput,total_throughput,median_ttft_ms,std_ttft_ms,p99_ttft_ms,mean_tpot_ms,median_tpot_ms,std_tpot_ms,p99_tpot_ms,median_itl_ms,std_itl_ms,p99_itl_ms,total_output_tokens,mean_ttft_ms,mean_itl_ms' > "$output_csv"

# 遍历所有测试用例
for case in "${test_cases[@]}"; do
    # 拆分当前测试项参数（修正变量顺序）
    IFS=' ' read -r max_concurrency input_len output_len num_prompts <<< "$case"
    
    echo "Starting test case: max_concurrency=$max_concurrency, input_len=$input_len, output_len=$output_len, num_prompts=$num_prompts"
    
    # 每组用例运行3次测试
    for ((i=0; i<3; i++)); do
        python3 -m sglang.bench_serving \
            --host 0.0.0.0 \
            --port "$port" \
            --backend sglang \
            --model "$model_dir" \
            --tokenizer "$model_dir" \
            --dataset-name random \
            --dataset-path /data1/dataset/ShareGPT_V3_unfiltered_cleaned_split.json \
            --max-concurrency "$max_concurrency" \
            --output-file "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-${i}.json" \
            --num-prompts "$num_prompts" \
            --random-input-len "$input_len" \
            --random-output-len "$output_len"
    done

    # 处理生成的结果文件（修正未定义变量和字段顺序）
    for result_file in "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-"*.json; do
        jq -cr '
            .max_concurrency,
            .random_input_len,
            .random_output_len,
            .total_input_tokens,
            .concurrency,
            .duration,
            .request_throughput,
            .input_throughput,
            .output_throughput,
            (.input_throughput + .output_throughput),
            .median_ttft_ms,
            .std_ttft_ms,
            .p99_ttft_ms,
            .mean_tpot_ms,
            .median_tpot_ms,
            .std_tpot_ms,
            .p99_tpot_ms,
            .median_itl_ms,
            .std_itl_ms,
            .p99_itl_ms,
            .total_output_tokens,
            .mean_ttft_ms,
            .mean_itl_ms
        ' "$result_file" |
        tr '\n' ',' |
        sed 's/,$/\n/' >> "$output_csv"
    done

    # 清理当前测试项的临时文件
    rm -f "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-"*.json
done