#!/usr/bin/env bash
set -euxo pipefail

# GPU优化的训练脚本
# 确保使用GPU并优化性能

DATASET=${1:-SINPA}
GPU=${2:-0}
MODE=${3:-train}
WANDB_MODE=${4:-offline}

# 设置GPU优化环境变量
export CUDA_VISIBLE_DEVICES=$GPU
export FFT_FORCE_CPU=0  # 强制使用GPU FFT
export OMP_NUM_THREADS=1
export MKL_NUM_THREADS=1

# 激活虚拟环境
if [ -d .venv ]; then
  source .venv/bin/activate || true
fi

# 检查GPU状态
echo "=== GPU Status ==="
nvidia-smi --query-gpu=name,memory.total,memory.used,utilization.gpu --format=csv,noheader,nounits

# 检查PyTorch GPU支持
python -c "
import torch
print(f'PyTorch version: {torch.__version__}')
print(f'CUDA available: {torch.cuda.is_available()}')
if torch.cuda.is_available():
    print(f'CUDA version: {torch.version.cuda}')
    print(f'GPU count: {torch.cuda.device_count()}')
    print(f'Current GPU: {torch.cuda.current_device()}')
    print(f'GPU name: {torch.cuda.get_device_name(0)}')
"

# 使用hy-tmp数据目录
DATAPATH="/hy-tmp"

# 检查数据文件
echo "=== Data Check ==="
if [ -f "$DATAPATH/train.npz" ] && [ -f "$DATAPATH/val.npz" ] && [ -f "$DATAPATH/test.npz" ]; then
    echo "Found .npz files in $DATAPATH"
else
    echo "Converting .npy to .npz format..."
    python -c "
import numpy as np
import os
splits = ['train', 'val', 'test']
for split in splits:
    x_file = f'$DATAPATH/{split}_x.npy'
    y_file = f'$DATAPATH/{split}_y.npy'
    npz_file = f'$DATAPATH/{split}.npz'
    if os.path.exists(x_file) and os.path.exists(y_file):
        print(f'Converting {split}...')
        x = np.load(x_file)
        y = np.load(y_file)
        np.savez(npz_file, x=x, y=y)
        print(f'Saved {npz_file} with shapes: x={x.shape}, y={y.shape}')
"
fi

# 训练参数 - 优化GPU使用
BASE_LR=1e-3
LR_DECAY_RATIO=0.5
BATCH_SIZE=8
OPTIMIZER=Adam
BLOCKS=2

echo "=== Starting Training ==="
echo "Dataset: $DATASET"
echo "GPU: $GPU"
echo "Data path: $DATAPATH"
echo "Batch size: $BATCH_SIZE"

# 运行训练 - 使用GPU优化参数
python ./experiments/DeepPA/main.py \
  --dataset "$DATASET" --mode "$MODE" --gpu "$GPU" \
  --batch_size "$BATCH_SIZE" --base_lr "$BASE_LR" \
  --lr_decay_ratio "$LR_DECAY_RATIO" --optimizer "$OPTIMIZER" \
  --n_hidden 64 --n_blocks "$BLOCKS" --wandb true --wandb_mode "$WANDB_MODE" \
  --datapath "$DATAPATH" \
  --num_workers 2 \
  --pin_memory \
  --prefetch_factor 2 \
  --persistent_workers \
  --amp \
  --max_epochs 10

echo "=== Training Completed ==="
