#!/bin/bash

#SBATCH --time=720:00:00   # walltime
#SBATCH --ntasks-per-node=8 # number of CPU cores, max 64
#SBATCH --nodes=1   # number of nodes, do not change
#SBATCH --mem=120G   # memory per node, max 500G
#SBATCH -J "vasp632"   # job name
#SBATCH --partition=vasp # do not change
#SBATCH --qos=vasp # do not change

ulimit -d unlimited
ulimit -s unlimited
ulimit -t unlimited
ulimit -v unlimited

source /opt/intel/oneapi/setvars.sh

### Added by WANG Jian, abbott@jlu.edu.cn
export UCX_TLS=ud,sm,self


# std
export EXE=/opt/hpc4you/apps/vasp/vasp632/bin/vasp_std

# ncl
#export EXE=/opt/hpc4you/apps/vasp/vasp632/bin/vasp_ncl

# gam
#export EXE=/opt/hpc4you/apps/vasp/vasp632/bin/vasp_gam


cd $SLURM_SUBMIT_DIR
username=`whoami`

### here, suppose /tmp is the I/O storage.

VASP_RUNDIR=/tmp/${username}.${SLURM_JOB_ID}

if [ ! -a $VASP_RUNDIR ]; then
   echo "Scratch directory $VASP_RUNDIR created."
   mkdir -p $VASP_RUNDIR
fi

cp -r $SLURM_SUBMIT_DIR/* $VASP_RUNDIR

cd $VASP_RUNDIR


echo "Starting VASP run at `hostname` on:" `date` >> ${SLURM_JOB_ID}.log



mpirun -np $SLURM_NTASKS ${EXE} 1>output 2>error

echo "Finished VASP run at `hostname` on:" `date` >> ${SLURM_JOB_ID}.log
cat $SLURM_JOB_NODELIST >> ${SLURM_JOB_ID}.log

cp -fr $VASP_RUNDIR/* $SLURM_SUBMIT_DIR
rm -fr $VASP_RUNDIR

