#! /bin/bash

######## Part 1 #########
#SBATCH --partition=gpu
#SBATCH --qos=normal
#SBATCH --account=junogpu
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
#SBATCH --mem-per-cpu=4G
#SBATCH --gpus=1
#SBATCH --job-name=WGAN

######## Part 2 ######  
cd /hpcfs/juno/junogpu/lizhihao/research/GAN
# conda activate pt-cu121-py310
# get slurm jobid of this job
python WGAN.py --lr 0.0005 --clip 1.0 --batch_size 64 --version $SLURM_JOB_ID --penalty clip --gp_lambda 10.0 > /dev/null
# python WGAN.py --lr 0.0005 --clip 1.0 --batch_size 64 --version optiplex --penalty gp --gp_lambda 10.0 --metric fid
# python evaluate.py -i /hpcfs/juno/junogpu/lizhihao/research/CaloChallenge/data/eval-1-photon.hdf5 -r /hpcfs/juno/junogpu/lizhihao/research/CaloChallenge/data/dataset_1_photons_2.hdf5 -m all -d 1-photons --output_dir /hpcfs/juno/junogpu/lizhihao/research/CaloChallenge/code/evaluation_results/
# tensorboard --logdir lightning_logs_WGAN --port=25565
