#!/bin/bash

set -ex

# 检查并安装jq工具
check_jq() {
    if ! command -v jq &> /dev/null; then
        echo "jq is not installed. Installing jq..."
        # 判断操作系统类型
        if [ -f /etc/os-release ]; then
            . /etc/os-release
            if [ "$ID" = "ubuntu" ] || [ "$ID" = "debian" ]; then
                apt-get install -y jq > /dev/null
            elif [ "$ID" = "centos" ] || [ "$ID" = "rhel" ] || [ "$ID" = "rocky" ]; then
                yum install -y jq > /dev/null
            else
                echo "Unsupported OS. Please install jq manually and try again."
                exit 1
            fi
        else
            echo "Cannot determine OS. Please install jq manually and try again."
            exit 1
        fi
        echo "jq installed successfully"
    fi
}

# 目标文件夹
export RES_DIR_PATH=/data2/jzh/p2pshare/sglang_v0.5.5.post2_235b_hich

# 默认参数
model_dir=/data2/Qwen3-235B-A22B-FP8-Block64
datasets_path=/data2/datasets/ShareGPT_V3_unfiltered_cleaned_split.json
port=8888

# 定义测试配置
input_lengths=(128 512 1024 2048 4096 8192 16384)
output_length=512
concurrency_levels=(1 10 20 30 40 50 60 70 80 90 100 110 120 130 140 150 160 170 180)
requests_multiplier=5  # 请求数 = 并发数 × 5

# 生成测试用例
generate_test_cases() {
    local test_cases=()
    
    for input_len in "${input_lengths[@]}"; do
        for concurrency in "${concurrency_levels[@]}"; do
            local num_prompts=$((concurrency * requests_multiplier))
            test_cases+=("$input_len $output_length $concurrency $num_prompts")
        done
    done
    
    printf '%s\n' "${test_cases[@]}"
}

# 解析命令行参数
while getopts "m:p:" opt; do
    case $opt in
        m) model_dir=$OPTARG ;;
        p) port=$OPTARG ;;
        *) echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
           exit 1
           ;;
    esac
done

# 检查输出CSV文件参数
if [ $OPTIND -gt $# ]; then
    echo "Error: Please specify output CSV file as the last argument"
    echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
    exit 1
fi
output_csv=${!#}

# 为output_csv添加目录（如果不是绝对路径，则默认放在RES_DIR_PATH下）
if [[ "$output_csv" != /* ]]; then
    output_csv="${RES_DIR_PATH}/${output_csv}"
fi

# 确保结果目录和日志目录存在
mkdir -p "$RES_DIR_PATH"
mkdir -p "${RES_DIR_PATH}/logs"

# 检查jq是否安装
check_jq

# 启动sglang服务器并记录进程ID
echo "Starting sglang server on port $port..."
python3 -m sglang.launch_server \
    --model-path "$model_dir" \
    --port "$port" \
    --tensor-parallel-size 8 \
    --host 0.0.0.0 \
    --enable-hierarchical-cache \
    --hicache-size 40 \
    --mem-fraction-static 0.9 > "${RES_DIR_PATH}/logs/server.log" 2>&1 &
server_pid=$!

# 等待服务器启动（检查端口是否就绪）
echo "Waiting for server to start on port $port..."
max_attempts=60  # 最大尝试次数
attempt=0
while ! ss -tuln | grep -q ":$port"; do
    if [ $attempt -ge $max_attempts ]; then
        echo "Error: Server did not start within $((max_attempts * 10)) seconds"
        kill $server_pid 2>/dev/null
        exit 1
    fi
    attempt=$((attempt + 1))
    sleep 10  # 每10秒检查一次
done
echo "Server started successfully on port $port"

# 脚本退出时自动关闭服务器的清理函数
cleanup() {
    echo "Stopping sglang server (PID: $server_pid)..."
    kill $server_pid 2>/dev/null
    wait $server_pid 2>/dev/null
    echo "Server stopped"
}
trap cleanup EXIT  # 捕获脚本退出信号，执行清理

# 写入CSV表头
echo 'input_length,output_length,max_concurrency,num_prompts,total_throughput,mean_ttft_ms,mean_itl_ms' > "$output_csv"

# 生成并遍历所有测试用例
test_cases=$(generate_test_cases)

# 统计总测试用例数
total_cases=$(echo "$test_cases" | wc -l)
current_case=0

echo "Total test cases: $total_cases"

while IFS= read -r case; do
    [ -z "$case" ] && continue  # 跳过空行
    
    IFS=' ' read -r input_len output_len max_concurrency num_prompts <<< "$case"
    current_case=$((current_case + 1))
    
    echo "[$current_case/$total_cases] Starting test case: input_len=$input_len, output_len=$output_len, max_concurrency=$max_concurrency, num_prompts=$num_prompts"
    
    # 每组用例运行3次测试
    for ((i=0; i<3; i++)); do
        SYS_TIME=$(date "+%Y%m%d_%H%M%S")
        LOG="run${i}_${input_len}_${output_len}_${max_concurrency}_${num_prompts}_${SYS_TIME}"
        echo "  Run $((i+1))/3: ${RES_DIR_PATH}/logs/${LOG}.log"
        
        python3 -m sglang.bench_serving \
            --host 0.0.0.0 \
            --port "$port" \
            --backend sglang \
            --model "$model_dir" \
            --tokenizer "$model_dir" \
            --dataset-name random \
            --dataset-path "$datasets_path" \
            --max-concurrency "$max_concurrency" \
            --output-file "${RES_DIR_PATH}/subres-${input_len}-${output_len}-${max_concurrency}-${num_prompts}-${i}.json" \
            --num-prompts "$num_prompts" \
            --random-input-len "$input_len" \
            --random-output-len "$output_len" \
            2>&1 | tee -a "${RES_DIR_PATH}/logs/${LOG}.log"
    done

    # 处理生成的结果文件（匹配当前测试用例的所有3次运行结果）
    for result_file in "${RES_DIR_PATH}/subres-${input_len}-${output_len}-${max_concurrency}-${num_prompts}-2"*.json; do
        if [ -f "$result_file" ]; then
            # 提取JSON中的关键指标并写入CSV
            jq -cr '[
                .max_concurrency,
                .completed,
                .random_input_len,
                .random_output_len,
                (.input_throughput + .output_throughput),
                .mean_ttft_ms,
                .mean_itl_ms
            ] | @csv' "$result_file" >> "$output_csv"
        fi
        echo "" >> "$output_csv"
    done

    # 清理当前测试项的临时文件
    rm -f "${RES_DIR_PATH}/subres-${input_len}-${output_len}-${max_concurrency}-${num_prompts}-"*.json

    echo "  Completed test case: input_len=$input_len, output_len=$output_len, max_concurrency=$max_concurrency"

done <<< "$test_cases"

echo "All tests completed. Results saved to $output_csv"