#!/bin/bash

# ====================================
# 用法: bash run_dist.sh <num_gpus> <config.yaml>
# 示例: bash run_dist.sh 4 /path/to/config.yaml
# ====================================

if [ $# -ne 2 ]; then
    echo "❌ 用法: bash $0 <num_gpus> <config.yaml>"
    exit 1
fi

NUM_GPUS=$1
CONFIG=$2

# ====== Step 1: 按已使用显存排序（从小到大）选出GPU编号 ======
GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | \
    awk '{print NR-1, $1}' | sort -k2n | head -n $NUM_GPUS | cut -d' ' -f1 | paste -sd "," -)

FOUND=$(echo $GPU_IDS | awk -F',' '{print NF}')
if [ "$FOUND" -lt "$NUM_GPUS" ]; then
    echo "❌ 找到的可用 GPU 数量不足 $NUM_GPUS 张，仅有 $FOUND 张: $GPU_IDS"
    exit 1
fi

echo "✅ 使用 GPU（显存占用最小前 $NUM_GPUS 张）: $GPU_IDS"

# ====== Step 2: 自动选择一个未被占用的 master port ======
get_free_port() {
    while true; do
        PORT=$((RANDOM % 1000 + 29000))
        if ! lsof -i:$PORT &>/dev/null; then
            echo $PORT
            return
        fi
    done
}

MASTER_PORT=$(get_free_port)
echo "✅ 使用 master_port: $MASTER_PORT"

# ====== Step 3: 启动分布式测试脚本 ======
CUDA_VISIBLE_DEVICES=$GPU_IDS \
python -m torch.distributed.run \
    --nproc_per_node=$NUM_GPUS \
    --master_port=$MASTER_PORT \
    test_custom_ddp.py \
    "$CONFIG"
