#!/bin/bash

set -ex

export RES_DIR_PATH=/data1/jzh/p2pshare/deppseekr1_nohich

# 默认参数
model_dir=/models/DeepSeek-R1
datasets_path=/data2/dataset/ShareGPT_V3_unfiltered_cleaned_split.json
port=8080

# 定义测试用例
test_cases=(
  "512 400 200 500"
)
# 调试用例（需要时取消注释）
# test_cases=(
#   "128 512 5 50"
#   "128 512 10 100"
#   "128 512 20 100"
#   "128 512 40 100"
#   "128 512 60 100"
# )


# 解析命令行参数
while getopts "m:p:" opt; do
    case $opt in
        m) model_dir=$OPTARG ;;
        p) port=$OPTARG ;;
        *) echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
           exit 1
           ;;
    esac
done

# 检查输出CSV文件参数
if [ $OPTIND -gt $# ]; then
    echo "Error: Please specify output CSV file as the last argument"
    echo "Usage: $0 [-m hf_model_dir] [-p port] <output_csv>"
    exit 1
fi
output_csv=${!#}

# 为output_csv添加目录（如果不是绝对路径，则默认放在RES_DIR_PATH下）
if [[ "$output_csv" != /* ]]; then
    output_csv="${RES_DIR_PATH}/${output_csv}"
fi

# 确保结果目录和日志目录存在
mkdir -p "$RES_DIR_PATH"
mkdir -p "${RES_DIR_PATH}/logs"

# 启动sglang服务器并记录进程ID
echo "Starting sglang server on port $port..."
python3 -m sglang.launch_server \
    --model-path "$model_dir" \
    --tensor-parallel-size 8 \
    --host 0.0.0.0 \
    --port "$port" \
    --mem-fraction-static 0.9 > "${RES_DIR_PATH}/logs/server.log" 2>&1 &
server_pid=$!

# 等待服务器启动（检查端口是否就绪）
echo "Waiting for server to start on port $port..."
max_attempts=30  # 最大尝试次数
attempt=0
while ! ss -tuln | grep -q ":$port"; do
    if [ $attempt -ge $max_attempts ]; then
        echo "Error: Server did not start within $((max_attempts * 5)) seconds"
        kill $server_pid 2>/dev/null
        exit 1
    fi
    attempt=$((attempt + 1))
    sleep 5  # 每5秒检查一次
done
echo "Server started successfully on port $port"

# 脚本退出时自动关闭服务器的清理函数
cleanup() {
    echo "Stopping sglang server (PID: $server_pid)..."
    kill $server_pid 2>/dev/null
    wait $server_pid 2>/dev/null
    echo "Server stopped"
}
trap cleanup EXIT  # 捕获脚本退出信号，执行清理

# 写入CSV表头
echo 'max_concurrency,random_input_len,random_output_len,total_input_tokens,concurrency,duration,request_throughput,input_throughput,output_throughput,median_ttft_ms,std_ttft_ms,p99_ttft_ms,mean_tpot_ms,median_tpot_ms,std_tpot_ms,p99_tpot_ms,median_itl_ms,std_itl_ms,p99_itl_ms,total_output_tokens,total_throughput,mean_ttft_ms,mean_itl_ms' > "$output_csv"

# 遍历所有测试用例
for case in "${test_cases[@]}"; do
    IFS=' ' read -r input_len output_len max_concurrency num_prompts <<< "$case"
    
    echo "Starting test case: max_concurrency=$max_concurrency, input_len=$input_len, output_len=$output_len, num_prompts=$num_prompts"
    
    # 每组用例运行3次测试
    for ((i=0; i<3; i++)); do
        SYS_TIME=$(date "+%Y%m%d_%H%M%S")
        LOG="run${i}_${max_concurrency}_${input_len}_${output_len}_${num_prompts}_${SYS_TIME}"
        echo "Log file: ${RES_DIR_PATH}/logs/${LOG}.log"
        
        python3 -m sglang.bench_serving \
            --host 0.0.0.0 \
            --port "$port" \
            --backend sglang \
            --model "$model_dir" \
            --tokenizer "$model_dir" \
            --dataset-name random \
            --dataset-path "$datasets_path" \
            --max-concurrency "$max_concurrency" \
            --output-file "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-${i}.json" \
            --num-prompts "$num_prompts" \
            --random-input-len "$input_len" \
            --random-output-len "$output_len" \
            2>&1 | tee -a "${RES_DIR_PATH}/logs/${LOG}.log"
    done

    # 处理生成的结果文件
    for result_file in "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-"*.json; do
        if [ -f "$result_file" ]; then
            jq -cr '
                .max_concurrency,
                .random_input_len,
                .random_output_len,
                .total_input_tokens,
                .concurrency,
                .duration,
                .request_throughput,
                .input_throughput,
                .output_throughput,
                .median_ttft_ms,
                .std_ttft_ms,
                .p99_ttft_ms,
                .mean_tpot_ms,
                .median_tpot_ms,
                .std_tpot_ms,
                .p99_tpot_ms,
                .median_itl_ms,
                .std_itl_ms,
                .p99_itl_ms,
                .total_output_tokens,
                (.input_throughput + .output_throughput),
                .mean_ttft_ms,
                .mean_itl_ms
            ' "$result_file" |
            tr '\n' ',' |
            sed 's/,$/\n/' >> "$output_csv"
        fi
    done

    # 清理当前测试项的临时文件
    rm -f "${RES_DIR_PATH}/subres-${max_concurrency}-${input_len}-${output_len}-${num_prompts}-"*.json
done

echo "All tests completed. Results saved to $output_csv"