#!/bin/bash
set -euo pipefail

# 默认使用当前环境 python，可通过 PYTHON 覆盖
PYTHON_BIN="${PYTHON:-python}"

PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "${PROJECT_ROOT}"

CONFIG_PATH="${CONFIG_PATH:-${PROJECT_ROOT}/configs/r50_deformable_detr_motip_dancetrack_lwg.yaml}"
CHECKPOINT_PATH="${CHECKPOINT_PATH:-${PROJECT_ROOT}/pretrains/dancetrack_baseline.pth}"
LWG_MODEL_PATH="${LWG_MODEL_PATH:-${PROJECT_ROOT}/pretrains/lwg_dancetrack.pt}"
BUFFER_GATE_MODEL_PATH="${BUFFER_GATE_MODEL_PATH:-${PROJECT_ROOT}/pretrains/buffer_gate_dancetrack.pt}"
DATA_ROOT="${DATA_ROOT:-/home/liuyonghui/datasets}"
OUTPUT_ROOT="${OUTPUT_ROOT:-${PROJECT_ROOT}/outputs/lwg_sweep_dancetrack}"
INFERENCE_SPLIT="${INFERENCE_SPLIT:-val}"

# 扫描的阈值列表（空格分隔，可用环境变量覆盖）
LWG_MAIN_LIST="${LWG_MAIN_LIST:-0.16 0.18 0.20 0.22 0.24}"
LWG_BUF_LIST="${LWG_BUF_LIST:-0.08 0.10 0.12 0.14 0.16}"
BUFFER_GATE_LIST="${BUFFER_GATE_LIST:-0.22 0.24 0.26 0.28 0.30}"

# 多 GPU 配置，默认使用 8 卡（可按需缩减）
GPU_LIST="${GPU_LIST:-0 1 2 3 4 5 6 7}"

# Environment settings
export NVI_NOTIFY_IGNORE_TASK=1
export LD_PRELOAD="/opt/lib/fakeintel/libfakeintel.so"

mkdir -p "${OUTPUT_ROOT}"

IFS=' ' read -r -a GPU_ARRAY <<< "${GPU_LIST}"
NUM_GPUS=${#GPU_ARRAY[@]}
if (( NUM_GPUS == 0 )); then
  echo "ERROR: GPU_LIST 为空，请设置可用 GPU，例如 GPU_LIST=\"0 1 2 3\""
  exit 1
fi

declare -a TASKS
for main_raw in ${LWG_MAIN_LIST}; do
  printf -v main_fmt "%.2f" "${main_raw}"
  main_tag=${main_fmt//./p}
  for buf_raw in ${LWG_BUF_LIST}; do
    printf -v buf_fmt "%.2f" "${buf_raw}"
    buf_tag=${buf_fmt//./p}
    for gate_raw in ${BUFFER_GATE_LIST}; do
      printf -v gate_fmt "%.2f" "${gate_raw}"
      gate_tag=${gate_fmt//./p}
      tag="main${main_tag}_buf${buf_tag}_gate${gate_tag}"
      TASKS+=("${main_fmt} ${buf_fmt} ${gate_fmt} ${tag}")
    done
  done
done

TOTAL_TASKS=${#TASKS[@]}
echo "即将运行 ${TOTAL_TASKS} 组阈值组合，使用 ${NUM_GPUS} 张 GPU 并行。"

declare -a TASKS_BY_GPU
for ((i=0; i<NUM_GPUS; ++i)); do
  TASKS_BY_GPU[i]=""
done

for idx in "${!TASKS[@]}"; do
  gpu_idx=$((idx % NUM_GPUS))
  TASKS_BY_GPU[gpu_idx]+="${TASKS[idx]}"$'\n'
done

run_tasks_on_gpu() {
  local gpu="$1"
  local task_block="$2"

  if [[ -z "${task_block// }" ]]; then
    echo "[GPU ${gpu}] 无任务，跳过。"
    return 0
  fi

  echo "[GPU ${gpu}] 接收到任务："
  echo "${task_block}"

  while read -r main_th buf_th gate_th tag; do
    [[ -z "${main_th}" ]] && continue
    echo "[GPU ${gpu}] 开始 ${tag} (main=${main_th}, buf=${buf_th}, gate=${gate_th})"
    CUDA_VISIBLE_DEVICES="${gpu}" "${PYTHON_BIN}" - <<PY
from pathlib import Path
from compare_lwg import load_config, prepare_common, run_once

config_path = Path("${CONFIG_PATH}")
checkpoint = Path("${CHECKPOINT_PATH}")
lwg_model = Path("${LWG_MODEL_PATH}").resolve()
buffer_model = Path("${BUFFER_GATE_MODEL_PATH}").resolve()
output_root = Path("${OUTPUT_ROOT}").resolve()
data_root = "${DATA_ROOT}"
split = "${INFERENCE_SPLIT}"

cfg = load_config(str(config_path))
base_cfg = prepare_common(cfg, str(checkpoint), output_root, data_root)
base_cfg["INFERENCE_SPLIT"] = split
base_cfg["USE_LWG"] = True
base_cfg["USE_BUFFER_GATE"] = False
base_cfg["LWG_MODEL_PATH"] = str(lwg_model)
base_cfg.setdefault("INFERENCE_GROUP", "default_buffer_gate")

cfg_run = dict(base_cfg)
cfg_run["LWG_MAIN_TH"] = ${main_th}
cfg_run["LWG_BUF_TH"] = ${buf_th}
cfg_run["BUFFER_GATE_THRESH"] = ${gate_th}

print(f"[SWEEP][DanceTrack] tag=${tag} main=${main_th} buf=${buf_th} gate=${gate_th}")
run_once(cfg_run, tag="${tag}", use_lwg=True, lwg_model=str(lwg_model))
PY
    echo "[GPU ${gpu}] 完成 ${tag}"
  done <<< "${task_block}"
}

pids=()
for idx in "${!GPU_ARRAY[@]}"; do
  gpu="${GPU_ARRAY[idx]}"
  run_tasks_on_gpu "${gpu}" "${TASKS_BY_GPU[idx]}" &
  pids+=($!)
done

set +e
fail=0
for pid in "${pids[@]}"; do
  if ! wait "${pid}"; then
    fail=1
  fi
done
set -e

if (( fail )); then
  echo "部分扫描任务失败，请检查上方日志。"
  exit 1
fi

echo "DanceTrack 阈值扫描完成，结果保存在：${OUTPUT_ROOT}"
