#!/bin/bash

set -ex

# 检查并安装jq工具
check_jq() {
    if ! command -v jq &> /dev/null; then
        echo "jq is not installed. Installing jq..."
        # 判断操作系统类型
        if [ -f /etc/os-release ]; then
            . /etc/os-release
            if [ "$ID" = "ubuntu" ] || [ "$ID" = "debian" ]; then
                apt-get install -y jq > /dev/null
            elif [ "$ID" = "centos" ] || [ "$ID" = "rhel" ] || [ "$ID" = "rocky" ]; then
                yum install -y jq > /dev/null
            else
                echo "Unsupported OS. Please install jq manually and try again."
                exit 1
            fi
        else
            echo "Cannot determine OS. Please install jq manually and try again."
            exit 1
        fi
        echo "jq installed successfully"
    fi
}

# 目标文件夹
export RES_DIR_PATH=/data2/jzh/p2pshare/sglang_v0.5.5.post2_normorl

# 默认参数
model_dir=/data1/Qwen3-32B-FP8
datasets_path=/data1/datasets/ShareGPT_V3_unfiltered_cleaned_split.json
port=8888

# 定义测试用例
test_cases=(
    "128 256 1 1"
    "128 256 10 10"
    "128 256 20 20"
    "128 256 50 50"
    "128 256 100 100"
    "128 256 110 110"
    "128 256 120 120"
    "128 256 130 130"
    "128 256 140 140"
    "128 256 150 150"
    "128 256 160 160"
    "128 256 170 170"
    "128 256 180 180"
    "128 256 200 200"
    "512 512 1 1"
    "512 512 10 10"
    "512 512 20 20"
    "512 512 50 50"
    "512 512 100 100"
    "512 512 110 110"
    "512 512 120 120"
    "512 512 130 130"
    "512 512 140 140"
    "512 512 150 150"
    "512 512 160 160"
    "512 512 170 170"
    "512 512 180 180"
    "512 512 200 200"
    "128 256 10 50"
    "128 256 20 100"
    "128 256 50 250"
    "128 256 80 400"
    "128 256 100 500"
    "128 256 120 500"
    "128 256 150 500"
    "128 256 160 500"
    "128 256 170 500"
    "128 256 180 500"
    "128 256 200 1000"
    "512 512 10 50"
    "512 512 20 100"
    "512 512 50 250"
    "512 512 100 500"
    "512 512 120 500"
    "512 512 150 800"
    "512 512 160 500"
    "512 512 170 500"
    "512 512 180 500"
    "512 512 200 1000"
)
# 调试用例（需要时取消注释）
# test_cases=(
    # "128 256 1 1"
    # "128 256 10 10"
    # "128 256 20 20"
    # "128 256 50 50"
    # "128 256 100 100"
    # "128 256 150 150"
    # "128 256 200 200"
    # "512 512 1 1"
    # "512 512 10 10"
    # "512 512 20 20"
    # "512 512 50 50"
    # "512 512 100 100"
    # "512 512 150 150"
    # "512 512 200 200"
    # "128 256 10 50"
    # "128 256 20 100"
    # "128 256 50 250"
    # "128 256 100 500"
    # "128 256 150 800"
    # "128 256 200 1000"
    # "512 512 10 50"
    # "512 512 20 100"
    # "512 512 50 250"
    # "512 512 100 500"
    # "512 512 150 800"
    # "512 512 200 1000"
# )


# 解析命令行参数
while getopts "m:p:" opt; do
    case $opt in
        m) model_dir=$OPTARG ;;
        p) port=$OPTARG ;;
        *) echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
           exit 1
           ;;
    esac
done

# 检查输出CSV文件参数
if [ $OPTIND -gt $# ]; then
    echo "Error: Please specify output CSV file as the last argument"
    echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
    exit 1
fi
output_csv=${!#}

# 为output_csv添加目录（如果不是绝对路径，则默认放在RES_DIR_PATH下）
if [[ "$output_csv" != /* ]]; then
    output_csv="${RES_DIR_PATH}/${output_csv}"
fi

# 确保结果目录和日志目录存在
mkdir -p "$RES_DIR_PATH"
mkdir -p "${RES_DIR_PATH}/logs"

# 检查jq是否安装
check_jq

# 写入CSV表头
echo 'max_concurrency,num_prompts,random_input_len,random_output_len,total_throughput,mean_ttft_ms,mean_itl_ms' > "$output_csv"

# 遍历所有测试用例
for case in "${test_cases[@]}"; do
    IFS=' ' read -r input_len output_len max_concurrency num_prompts <<< "$case"
    
    echo "Starting test case: max_concurrency=$max_concurrency, input_len=$input_len, output_len=$output_len, num_prompts=$num_prompts"
    
    # 每组用例运行3次测试
    for ((i=0; i<3; i++)); do
        SYS_TIME=$(date "+%Y%m%d_%H%M%S")
        LOG="run${i}_${max_concurrency}_${input_len}_${output_len}_${num_prompts}_${SYS_TIME}"
        echo "Log file: ${RES_DIR_PATH}/logs/${LOG}.log"
        
        python3 -m sglang.bench_serving \
            --host 0.0.0.0 \
            --port "$port" \
            --backend vllm \
            --model "$model_dir" \
            --tokenizer "$model_dir" \
            --dataset-name random \
            --dataset-path "$datasets_path" \
            --max-concurrency "$max_concurrency" \
            --output-file "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-${i}.json" \
            --num-prompts "$num_prompts" \
            --random-input-len "$input_len" \
            --random-output-len "$output_len" \
            2>&1 | tee -a "${RES_DIR_PATH}/logs/${LOG}.log"
    done

    # 处理生成的结果文件（匹配当前测试用例的所有3次运行结果）
    for result_file in "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-2"*.json; do
        if [ -f "$result_file" ]; then
            # 提取JSON中的关键指标并写入CSV
            jq -cr '[
                .max_concurrency,
                .completed,
                .random_input_len,
                .random_output_len,
                (.input_throughput + .output_throughput),
                .mean_ttft_ms,
                .mean_itl_ms
            ] | @csv' "$result_file" >> "$output_csv"
        fi
        echo "" >> "$output_csv"
    done

    # 清理当前测试项的临时文件
    rm -f "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-"*.json
done

echo "All tests completed. Results saved to $output_csv"