#!/bin/bash -l

#PBS -A <proj>
#PBS -l select=<NODES>:system=aurora
#PBS -W run_count=17
#PBS -l walltime=0:10:00
#PBS -l filesystems=home:flare
#PBS -q debug
#PBS -N test_warpx

# Set required environment variables
# support gpu-aware-mpi
# export MPIR_CVAR_ENABLE_GPU=1

# Change to working directory
echo Working directory is $PBS_O_WORKDIR
cd ${PBS_O_WORKDIR}

echo Jobid: $PBS_JOBID
echo Running on host `hostname`
echo Running on nodes `cat $PBS_NODEFILE`

# On Aurora, must load module environment in job script:
source $HOME/aurora_warpx.profile

# executable & inputs file or python interpreter & PICMI script here
EXE=./warpx
INPUTS=input1d

# MPI and OpenMP settings
NNODES=`wc -l < $PBS_NODEFILE`
NRANKS_PER_NODE=12 # 1 rank per PVC tile (a.k.a. stack; like a gcd on an AMD GPU)
NTHREADS=1
# Avoid core 0 on socket 0 and core 52 on socket 1, where OS threads run:
CPU_RANK_BIND="--cpu-bind=list:1-8:9-16:17-24:25-32:33-40:41-48:53-60:61-68:69-76:77-84:85-92:93-100"
# Convention: gpuNumber.tileNumber
GPU_RANK_BIND="--gpu-bind=list:0.0:0.1:1.0:1.1:2.0:2.1:3.0:3.1:4.0:4.1:5.0:5.1"

NTOTRANKS=$(( NNODES * NRANKS_PER_NODE ))
echo "NUM_OF_NODES= ${NNODES} TOTAL_NUM_RANKS= ${NTOTRANKS} RANKS_PER_NODE= ${NRANKS_PER_NODE} THREADS_PER_RANK= ${NTHREADS}"

mpiexec --np ${NTOTRANKS} -ppn ${NRANKS_PER_NODE} ${CPU_RANK_BIND} ${GPU_RANK_BIND} -envall ${EXE} ${INPUTS} > output.txt
