#!/usr/bin/env bash
set -euxo pipefail

# Usage:
#   bash scripts/train.sh [DATASET=SINPA] [GPU=0] [MODE=train] [WANDB_MODE=offline]
# Notes:
#   - Matches paper: Adam, batch_size=8, base_lr=1e-3, halved every 3 epochs.
#   - Steps are computed in code as every 3 epochs (see experiments/DeepPA/main.py).
#   - Set RUN_GRID=true to run grid search over C in {8,16,32,64,128}.

DATASET=${1:-SINPA}
GPU=${2:-0}
MODE=${3:-train}
WANDB_MODE=${4:-offline}
RUN_GRID=${RUN_GRID:-false}

# Try to activate venv if exists
if [ -d .venv ]; then
  source .venv/bin/activate || true
fi

# Optional: auto-download dataset if not found (requires huggingface_hub)
if [ ! -d "./data/$DATASET" ]; then
  echo "Dataset ./data/$DATASET not found. Attempting HuggingFace download..."
  python - <<'PY'
import os
from huggingface_hub import snapshot_download
snapshot_download(repo_id="Huaiwu/SINPA", repo_type="dataset", local_dir="./data/SINPA")
print("Downloaded dataset to ./data/SINPA")
PY
fi

# Paper settings
BASE_LR=1e-3
LR_DECAY_RATIO=0.5
BATCH_SIZE=8
OPTIMIZER=Adam
BLOCKS=2

# Single run (C=64) — best according to paper
python ./experiments/DeepPA/main.py \
  --dataset "$DATASET" --mode "$MODE" --gpu "$GPU" \
  --batch_size "$BATCH_SIZE" --base_lr "$BASE_LR" \
  --lr_decay_ratio "$LR_DECAY_RATIO" --optimizer "$OPTIMIZER" \
  --n_hidden 64 --n_blocks "$BLOCKS" --wandb true --wandb_mode "$WANDB_MODE"

# Optional grid search over C
if [ "$RUN_GRID" = "true" ]; then
  for C in 8 16 32 64 128; do
    python ./experiments/DeepPA/main.py \
      --dataset "$DATASET" --mode "$MODE" --gpu "$GPU" \
      --batch_size "$BATCH_SIZE" --base_lr "$BASE_LR" \
      --lr_decay_ratio "$LR_DECAY_RATIO" --optimizer "$OPTIMIZER" \
      --n_hidden "$C" --n_blocks "$BLOCKS" --wandb true --wandb_mode "$WANDB_MODE" \
      --n_exp "$C"
  done
fi

# Tips:
# - To run in background: nohup bash scripts/train.sh SINPA 0 train offline > train.log 2>&1 &
# - To resume venv later: source .venv/bin/activate