#!/bin/bash
#SBATCH --job-name=ml
#SBATCH --partition=universe
#SBATCH --output=slurm/result_%j.txt
#SBATCH --error=slurm/error_%j.txt
#SBATCH --mem=70G
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=8
#SBATCH --gres=gpu:1
#SBATCH --time=00:10:00

echo "start time: $(date)"
echo "SLURM_JOBID=""$SLURM_JOBID"
echo "SLURM_JOB_NODELIST"="$SLURM_JOB_NODELIST"
echo "SLURM_JOB_PARTITION"="$SLURM_JOB_PARTITION"
echo "SLURM_NNODES"="$SLURM_NNODES"
echo "SLURM_GPUS_ON_NODE"="$SLURM_GPUS_ON_NODE"
echo "SLURM_SUBMIT_DIR"="$SLURM_SUBMIT_DIR"

module load python/anaconda3
source activate mmcl

cd ECG-CMR-TIME-V2
export PYTHONPATH=$(pwd)

DATA_TYPE="multimodal" # 'ecg', 'imaging', 'multimodal'
PRETRAINING_TYPE="contrastive" # 'contrastive', 'masked', 'supervised'

# CONFIGS
DATASET_CONFIG="${DATA_TYPE}/${PRETRAINING_TYPE}"
MODELS_CONFIG="${DATA_TYPE}/${PRETRAINING_TYPE}"
TRAINING_MODE_CONFIG="${DATA_TYPE}/pretrain_${PRETRAINING_TYPE}"


if [ "$DATA_TYPE" = "imaging" ]; then
    echo "IMAGING TRAINING MODE: $PRETRAINING_TYPE"

    IMAGE_FRAMES=26
    TASK="img_pretrain_${PRETRAINING_TYPE}"
    AUGMENTATIONS_TYPE="without"  # Options: 'light', 'medium', 'heavy', 'without'
    GPUS_PER_NODE=1
    NUM_WORKERS=8

    if [ "$PRETRAINING_TYPE" = "contrastive" ]; then
        BACKBONE='vit'  # Options: 'resnet', 'vit'
        MODEL_DEPTH=50
        BATCH_SIZE=128
        WITH_EVAL=false
        AUGMENTATIONS_TYPE="heavy"

        PROJECTION_HIDDEN_DIM=256
        PROJECTION_DIM=128
        PROJECTION_NUM_LAYERS=2

        LR=3e-4
        WEIGHT_DECAY=1e-6
        WARMUP_STEPS=10
        MAX_EPOCHS=300

    elif [ "$PRETRAINING_TYPE" = "masked" ]; then
        BACKBONE='vit'
        MODEL_SIZE='base'
        BATCH_SIZE=256
        WITH_EVAL=true

        IMAGE_SIZE=84
        PATCH_SIZE=12
        TUBELET_SIZE=10
        HIDDEN_SIZE=512 # 384, 512, 768
        INTERMEDIATE_SIZE=2048 # 1536, 2048, 3072
        NUM_HIDDEN_LAYERS=4
        NUM_ATTENTION_HEADS=8
        DROP_PATH_RATE=0.0
        HIDDEN_DROPOUT=0.1
        ATTENTION_DROPOUT=0.1

        MASK_RATIO=0.9
        MASK_LOSS=true

        LR=3e-4
        WEIGHT_DECAY=1e-6
        WARMUP_STEPS=20
        MAX_EPOCHS=1500
        DOWNSTREAM_WARMUP_COSINE_USE=false
        REGRESSION_INTERVAL=50

    elif [ "$PRETRAINING_TYPE" = "supervised" ]; then
        BACKBONE='vit'
        MODEL_SIZE='base'
        BATCH_SIZE=256
        WITH_EVAL=false

        IMAGE_SIZE=84
        PATCH_SIZE=12
        TUBELET_SIZE=2
        HIDDEN_SIZE=512 # 384, 512, 768
        INTERMEDIATE_SIZE=2048 # 1536, 2048, 3072
        NUM_HIDDEN_LAYERS=4
        NUM_ATTENTION_HEADS=8
        DROP_PATH_RATE=0.0
        HIDDEN_DROPOUT=0.1
        ATTENTION_DROPOUT=0.1

        RESNET_MODEL_DEPTH=50

        # Supervised-specific parameters
        MASK_RATIO=0.0
        MASK_LOSS=false

        LR=3e-4
        WEIGHT_DECAY=1e-6
        WARMUP_STEPS=10
        MAX_EPOCHS=300

    fi

    CMD="python3 run.py \
        models=$MODELS_CONFIG \
        training_mode=$TRAINING_MODE_CONFIG \
        dataset=$DATASET_CONFIG \
        augmentations=$AUGMENTATIONS_TYPE \
        n_gpus=$GPUS_PER_NODE \
        dataset.num_workers=$NUM_WORKERS \
        dataset.batch_size=$BATCH_SIZE \
        max_epochs=$MAX_EPOCHS \
        models.params.scheduler.warmup_cosine.warmup_steps=$WARMUP_STEPS \
        models.params.scheduler.warmup_cosine.total_steps=$MAX_EPOCHS \
        models.params.scheduler.warmup_cosine.min_lr_factor=0.1 \
        augmentations.imaging.time_sample.result_n_frames=$IMAGE_FRAMES \
        training_mode.task=$TASK \
        training_mode.with_eval=$WITH_EVAL \
        models.params.lr=$LR \
        models.params.weight_decay=$WEIGHT_DECAY \
        downstream_task.augmentations.imaging.time_sample.result_n_frames=$IMAGE_FRAMES"

    if [ "$PRETRAINING_TYPE" = "contrastive" ]; then
        CMD="$CMD \
        models.type=image_contrastive \
        models.backbone=$BACKBONE \
        models.resnet.model_depth=$MODEL_DEPTH \
        models.projection.hidden_dim=$PROJECTION_HIDDEN_DIM \
        models.projection.proj_out_dim=$PROJECTION_DIM \
        models.projection.num_layers=$PROJECTION_NUM_LAYERS"

    elif [ "$PRETRAINING_TYPE" = "masked" ] || [ "$PRETRAINING_TYPE" = "supervised" ]; then
        CMD="$CMD \
        models.type=image_${PRETRAINING_TYPE} \
        models.backbone=$BACKBONE \
        models.model_size=$MODEL_SIZE \
        models.base.image_size=$IMAGE_SIZE \
        models.base.patch_size=$PATCH_SIZE \
        models.base.tubelet_size=$TUBELET_SIZE \
        models.base.hidden_size=$HIDDEN_SIZE \
        models.base.num_hidden_layers=$NUM_HIDDEN_LAYERS \
        models.base.num_attention_heads=$NUM_ATTENTION_HEADS \
        models.base.intermediate_size=$INTERMEDIATE_SIZE \
        models.base.drop_path_rate=$DROP_PATH_RATE \
        models.base.hidden_dropout_prob=$HIDDEN_DROPOUT \
        models.base.attention_probs_dropout_prob=$ATTENTION_DROPOUT \
        models.base.mask_ratio=$MASK_RATIO \
        models.base.mask_loss=$MASK_LOSS \
        models.resnet.model_depth=$RESNET_MODEL_DEPTH \
        downstream_task.epoch_interval=$REGRESSION_INTERVAL \
        downstream_task.params.scheduler.warmup_cosine.use=$DOWNSTREAM_WARMUP_COSINE_USE"
    fi

    echo "Running command: $CMD"
    srun $CMD

elif [ "$DATA_TYPE" = "ecg" ]; then
    echo "ECG TRAINING MODE: $PRETRAINING_TYPE"

    MODE="pretrain"
    MAX_EPOCHS=300
    NUM_WORKERS=4
    BATCH_SIZE=256
    NUM_HIDDEN_LAYERS=6
    NUM_ATTENTION_HEADS=8
    PATCH_SIZE="1,20"
    HIDDEN_SIZE=512 # 384, 512, 768
    INTERMEDIATE_SIZE=2048 # 1536, 2048, 3072
    TASK="ecg_${MODE}_${PRETRAINING_TYPE}"
    AUGMENTATIONS_TYPE="without" # 'light', 'medium', 'heavy', 'without'
    WITH_EVAL=false
    GPUS_PER_NODE=1

    DROP_PATH_RATE=0.0
    HIDDEN_DROPOUT=0.1
    ATTENTION_DROPOUT=0.1

    LR=3e-4
    WEIGHT_DECAY=1e-6
    WARMUP_STEPS=20
    WARMUP_COSINE_USE=false

    if [ "$MODE" = "finetune" ]; then
        CHECKPOINT_ECG_PATH="experiments/2024-10-18/14-36-24/best_val_loss_hf_model"
        USE_MLP=true
        TRAINING_SCHEME="fine_tune"
    else
        CHECKPOINT_ECG_PATH=None
        USE_MLP=false
        TRAINING_SCHEME='linear_probing'
    fi

    CMD="python3 run.py \
        models=$MODELS_CONFIG \
        training_mode=$TRAINING_MODE_CONFIG \
        dataset=$DATASET_CONFIG \
        augmentations=$AUGMENTATIONS_TYPE \
        n_gpus=$GPUS_PER_NODE \
        dataset.num_workers=$NUM_WORKERS \
        dataset.batch_size=$BATCH_SIZE \
        max_epochs=$MAX_EPOCHS \
        models.params.scheduler.warmup_cosine.warmup_steps=$WARMUP_STEPS \
        models.params.scheduler.warmup_cosine.total_steps=$MAX_EPOCHS \
        models.base.patch_size="[$PATCH_SIZE]" \
        models.base.num_hidden_layers=$NUM_HIDDEN_LAYERS \
        models.base.hidden_size=$HIDDEN_SIZE \
        models.base.intermediate_size=$INTERMEDIATE_SIZE \
        models.base.drop_path_rate=$DROP_PATH_RATE \
        models.base.hidden_dropout_prob=$HIDDEN_DROPOUT \
        models.base.attention_probs_dropout_prob=$ATTENTION_DROPOUT \
        models.base.num_attention_heads=$NUM_ATTENTION_HEADS \
        training_mode.task=$TASK \
        training_mode.with_eval=$WITH_EVAL \
        training_mode.checkpoint_path=$CHECKPOINT_ECG_PATH \
        models.params.lr=$LR \
        models.params.weight_decay=$WEIGHT_DECAY \
        downstream_task.training_scheme=$TRAINING_SCHEME \
        downstream_task.use_mlp=$USE_MLP \
        downstream_task.params.scheduler.warmup_cosine.use=$WARMUP_COSINE_USE"

    echo "Running command: $CMD"
    srun $CMD

elif [ "$DATA_TYPE" = "multimodal" ]; then
    echo "MULTIMODAL TRAINING MODE: $PRETRAINING_TYPE"
    
    MRI_CHECKPOINT_MODEL='experiments/2024-12-23/11-02-21/best_val_loss_hf_model'
    ECG_CHECKPOINT_MODEL='experiments/2024-11-05/12-15-52/best_val_loss_hf_model'

    GPUS_PER_NODE=1

    TRAIN_ECG=true
    FREEZE_FIRST_N_LAYERS_ECG=2

    TRAIN_IMAGE=true
    FREEZE_FIRST_N_LAYERS_IMAGE=2

    MAX_EPOCHS=120
    WARMUP_EPOCHS=2

    BATCH_SIZE=128
    MIN_LR_FACTOR=0.1

    IMAGE_FRAMES=26
    ECG_TIME_STEPS=2500
    
    PROJECTION_HIDDEN_DIM=256
    PROJECTION_DIM=128

    LR=3e-4
    IMAGE_LR=1e-4
    ECG_LR=1e-4

    WEIGHT_DECAY=1e-7
    IMAGE_WEIGHT_DECAY=1e-7
    ECG_WEIGHT_DECAY=1e-7

    # Loss and reduction settings
    LOSS_TYPE="both"
    
    LOSS_LOCAL_WEIGHT=1.0
    LOCAL_LOSS_TYPE="local_out"
    LOCAL_LOSS_STD=0.2

    TEMP_GLOBAL_LOSS=0.07
    TEMP_LOCAL_LOSS=0.15

    IMAGE_REDUCTION_GLOBAL="mean"
    ECG_REDUCTION_GLOBAL="mean"
    IMAGE_REDUCTION_LOCAL="mean"
    ECG_REDUCTION_LOCAL="mean"

    WITH_EVAL=true

    # Regression task
    REGRESSION_TASK="vol"
    REGRESSION_BATCH=256
    REGRESSION_INTERVAL=1
    REGRESSION_LR=3e-3
    REGRESSION_WEIGHT_DECAY=1e-6
    REINIT_LINEAR_LAYER_EVERY_EPOCH=true
    WARMUP_COSINE_USE=false
    USE_MLP=false
    TRAINING_SCHEME='linear_probing'

    AUGMENTATIONS_TYPE="heavy" # 'light', 'medium', 'heavy', 'without'

    ADDITIONAL_EXPERIMENT_NAME="BOTH_MEAN_CHANNEL"

    srun python3 run.py \
        models=$MODELS_CONFIG \
        training_mode=$TRAINING_MODE_CONFIG \
        dataset=$DATASET_CONFIG \
        augmentations=$AUGMENTATIONS_TYPE \
        n_gpus=$GPUS_PER_NODE \
        dataset.num_workers=8 \
        dataset.batch_size=$BATCH_SIZE \
        max_epochs=$MAX_EPOCHS \
        augmentations.imaging.time_sample.result_n_frames=$IMAGE_FRAMES \
        augmentations.ecg.random_crop.ecg_time_steps=$ECG_TIME_STEPS \
        models.projection.hidden_dim=$PROJECTION_HIDDEN_DIM \
        models.projection.d_contrastive=$PROJECTION_DIM \
        models.params.lr=$LR \
        models.params.lr_image_encoder=$IMAGE_LR \
        models.params.lr_ecg_encoder=$ECG_LR \
        models.params.weight_decay=$WEIGHT_DECAY \
        models.params.weight_decay_image_encoder=$IMAGE_WEIGHT_DECAY \
        models.params.weight_decay_ecg_encoder=$ECG_WEIGHT_DECAY \
        models.params.scheduler.warmup_cosine.min_lr_factor=$MIN_LR_FACTOR \
        models.params.scheduler.warmup_cosine.warmup_steps=$WARMUP_EPOCHS \
        models.params.scheduler.warmup_cosine.total_steps=$MAX_EPOCHS \
        training_mode.loss.type=$LOSS_TYPE \
        training_mode.loss.weight_local=$LOSS_LOCAL_WEIGHT \
        training_mode.loss.global_loss.temperature=$TEMP_GLOBAL_LOSS \
        training_mode.loss.local_loss.temperature=$TEMP_LOCAL_LOSS \
        training_mode.loss.local_loss.type=$LOCAL_LOSS_TYPE \
        training_mode.loss.local_loss.std=$LOCAL_LOSS_STD \
        training_mode.reduction.image.global_token=$IMAGE_REDUCTION_GLOBAL \
        training_mode.reduction.ecg.global_token=$ECG_REDUCTION_GLOBAL \
        training_mode.reduction.image.all_tokens=$IMAGE_REDUCTION_LOCAL \
        training_mode.reduction.ecg.all_tokens=$ECG_REDUCTION_LOCAL \
        training_mode.encoders.image.train=$TRAIN_IMAGE \
        training_mode.encoders.image.freeze_first_n_layers=$FREEZE_FIRST_N_LAYERS_IMAGE \
        training_mode.encoders.image.checkpoint_path=$MRI_CHECKPOINT_MODEL \
        training_mode.encoders.ecg.train=$TRAIN_ECG \
        training_mode.encoders.ecg.freeze_first_n_layers=$FREEZE_FIRST_N_LAYERS_ECG \
        training_mode.encoders.ecg.checkpoint_path=$ECG_CHECKPOINT_MODEL \
        training_mode.with_eval=$WITH_EVAL \
        downstream_task.type=$REGRESSION_TASK \
        downstream_task.batch_size=$REGRESSION_BATCH \
        downstream_task.epoch_interval=$REGRESSION_INTERVAL \
        downstream_task.params.lr=$REGRESSION_LR \
        downstream_task.params.weight_decay=$REGRESSION_WEIGHT_DECAY \
        downstream_task.training_scheme=$TRAINING_SCHEME \
        downstream_task.use_mlp=$USE_MLP \
        downstream_task.params.scheduler.warmup_cosine.use=$WARMUP_COSINE_USE \
        downstream_task.augmentations.imaging.time_sample.result_n_frames=$IMAGE_FRAMES \
        downstream_task.reinitialize_every_epoch=$REINIT_LINEAR_LAYER_EVERY_EPOCH \
        experiment_name_addition=$ADDITIONAL_EXPERIMENT_NAME
fi
