#!/bin/bash
#SBATCH --job-name=ml
#SBATCH --partition=universe
#SBATCH --output=slurm/result_%j.txt
#SBATCH --error=slurm/error_%j.txt
#SBATCH --mem=70G
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
#SBATCH --gres=gpu:1
#SBATCH --time=00:00:10

# Load required modules and activate conda environment
module load python/anaconda3
source activate mmcl

cd ECG-CMR-TIME-V2
export PYTHONPATH=$(pwd)

echo "Start time: $(date)"
echo "SLURM_JOBID: $SLURM_JOBID"
echo "SLURM_JOB_NODELIST: $SLURM_JOB_NODELIST"
echo "SLURM_JOB_PARTITION: $SLURM_JOB_PARTITION"
echo "SLURM_NNODES: $SLURM_NNODES"
echo "SLURM_GPUS_ON_NODE: $SLURM_GPUS_ON_NODE"
echo "SLURM_SUBMIT_DIR: $SLURM_SUBMIT_DIR"

# Receive parameters to tune as command-line arguments
LOCAL_LOSS_WEIGHT=$1
LOCAL_LOSS_STD=$2
TEMP_LOCAL_LOSS=$3

# Fixed parameters
DATA_TYPE="multimodal"
PRETRAINING_TYPE="contrastive"
DATASET_CONFIG="${DATA_TYPE}/${PRETRAINING_TYPE}"
MODELS_CONFIG="${DATA_TYPE}/${PRETRAINING_TYPE}"
TRAINING_MODE_CONFIG="${DATA_TYPE}/pretrain_${PRETRAINING_TYPE}"
AUGMENTATIONS_TYPE="heavy"

MRI_CHECKPOINT_MODEL='experiments/2024-12-23/11-02-21/best_val_loss_hf_model'
ECG_CHECKPOINT_MODEL='experiments/2024-11-05/12-15-52/best_val_loss_hf_model'

# Other fixed parameters
GPUS_PER_NODE=1

TRAIN_ECG=true
FREEZE_FIRST_N_LAYERS_ECG=2

TRAIN_IMAGE=true
FREEZE_FIRST_N_LAYERS_IMAGE=2

MAX_EPOCHS=120
WARMUP_EPOCHS=2

BATCH_SIZE=128
MIN_LR_FACTOR=0.1

IMAGE_FRAMES=26
ECG_TIME_STEPS=2500

PROJECTION_HIDDEN_DIM=256
PROJECTION_DIM=128

WITH_EVAL=true

TEMP_GLOBAL_LOSS=0.07

LOCAL_LOSS_TYPE="local_out"
CROP_METHOD="rpeak_to_rpeak"

LR=3e-4
IMAGE_LR=1e-4
ECG_LR=1e-4

WEIGHT_DECAY=1e-7
IMAGE_WEIGHT_DECAY=1e-7
ECG_WEIGHT_DECAY=1e-7

IMAGE_REDUCTION_GLOBAL="mean"
ECG_REDUCTION_GLOBAL="mean"
IMAGE_REDUCTION_LOCAL="mean"
ECG_REDUCTION_LOCAL="mean"

REGRESSION_TASK="vol"
REGRESSION_BATCH=256
REGRESSION_INTERVAL=1
REGRESSION_LR=3e-3
REGRESSION_WEIGHT_DECAY=1e-6
REINIT_LINEAR_LAYER_EVERY_EPOCH=true
WARMUP_COSINE_USE=false
USE_MLP=false

TRAINING_SCHEME='linear_probing'

ADDITIONAL_EXPERIMENT_NAME="GRID_SEARCH_FOR_BEST_RETRIEVAL_LOCAL_WEIGHT_${LOCAL_LOSS_WEIGHT//./_}"

srun python3 run.py \
    models=$MODELS_CONFIG \
    training_mode=$TRAINING_MODE_CONFIG \
    dataset=$DATASET_CONFIG \
    augmentations=$AUGMENTATIONS_TYPE \
    n_gpus=$GPUS_PER_NODE \
    dataset.num_workers=4 \
    dataset.batch_size=$BATCH_SIZE \
    max_epochs=$MAX_EPOCHS \
    augmentations.imaging.time_sample.result_n_frames=$IMAGE_FRAMES \
    augmentations.ecg.random_crop.ecg_time_steps=$ECG_TIME_STEPS \
    models.projection.hidden_dim=$PROJECTION_HIDDEN_DIM \
    models.projection.d_contrastive=$PROJECTION_DIM \
    models.params.lr=$LR \
    models.params.lr_image_encoder=$IMAGE_LR \
    models.params.lr_ecg_encoder=$ECG_LR \
    models.params.weight_decay=$WEIGHT_DECAY \
    models.params.weight_decay_image_encoder=$IMAGE_WEIGHT_DECAY \
    models.params.weight_decay_ecg_encoder=$ECG_WEIGHT_DECAY \
    models.params.scheduler.warmup_cosine.min_lr_factor=$MIN_LR_FACTOR \
    models.params.scheduler.warmup_cosine.warmup_steps=$WARMUP_EPOCHS \
    models.params.scheduler.warmup_cosine.total_steps=$MAX_EPOCHS \
    training_mode.loss.type="both" \
    training_mode.loss.weight_local=$LOCAL_LOSS_WEIGHT \
    training_mode.loss.global_loss.temperature=$TEMP_GLOBAL_LOSS \
    training_mode.loss.local_loss.temperature=$TEMP_LOCAL_LOSS \
    training_mode.loss.local_loss.type=$LOCAL_LOSS_TYPE \
    training_mode.loss.local_loss.std=$LOCAL_LOSS_STD \
    training_mode.reduction.image.global_token=$IMAGE_REDUCTION_GLOBAL \
    training_mode.reduction.ecg.global_token=$ECG_REDUCTION_GLOBAL \
    training_mode.reduction.image.all_tokens=$IMAGE_REDUCTION_LOCAL \
    training_mode.reduction.ecg.all_tokens=$ECG_REDUCTION_LOCAL \
    training_mode.reduction.ecg.crop_method=$CROP_METHOD \
    training_mode.encoders.image.train=$TRAIN_IMAGE \
    training_mode.encoders.image.freeze_first_n_layers=$FREEZE_FIRST_N_LAYERS_IMAGE \
    training_mode.encoders.image.checkpoint_path=$MRI_CHECKPOINT_MODEL \
    training_mode.encoders.ecg.train=$TRAIN_ECG \
    training_mode.encoders.ecg.freeze_first_n_layers=$FREEZE_FIRST_N_LAYERS_ECG \
    training_mode.encoders.ecg.checkpoint_path=$ECG_CHECKPOINT_MODEL \
    training_mode.with_eval=$WITH_EVAL \
    downstream_task.type=$REGRESSION_TASK \
    downstream_task.batch_size=$REGRESSION_BATCH \
    downstream_task.epoch_interval=$REGRESSION_INTERVAL \
    downstream_task.params.lr=$REGRESSION_LR \
    downstream_task.params.weight_decay=$REGRESSION_WEIGHT_DECAY \
    downstream_task.training_scheme=$TRAINING_SCHEME \
    downstream_task.use_mlp=$USE_MLP \
    downstream_task.params.scheduler.warmup_cosine.use=$WARMUP_COSINE_USE \
    downstream_task.augmentations.imaging.time_sample.result_n_frames=$IMAGE_FRAMES \
    downstream_task.reinitialize_every_epoch=$REINIT_LINEAR_LAYER_EVERY_EPOCH \
    experiment_name_addition=$ADDITIONAL_EXPERIMENT_NAME
