#!/bin/bash

# Readme
# The Message Passing Interface (MPI) is a message-passing standard used in parallel programming, 
# typically for multi-node, distributed processor and distributed memory use cases.

# Multi-threaded MPI programs use multi-threaded tasks, typically hybrid MPI/OpenMP programs.

# If using OpenMP for threading, 
# the environment variable OMP_NUM_THREADS should be set, 
# which specifies the number of threads to parallelize over. 

# current case, request for 
# 2 nodes, 
# each node, two CPUs, each CPU is installed to one socket
# each MPI-Rand runs on one CPU, 
# each CPU, 32 OMP Threads. 



#SBATCH --account=acc_XXX
#SBATCH --partition=workq
#SBATCH --nodes=2 # how many nodes to use
#SBATCH --ntasks-per-node=2  # the number of tasks (MPI ranks) to run per node.
#SBATCH --cpus-per-task=32 # the number of CPUs (threads) to use per task.
#SBATCH --sockets-per-node=2 # 2-way server
#SBATCH --ntasks-per-socket=1 
#SBATCH --cores-per-socket=32
#SBATCH --mem=0  # requests all available memory per node.
#SBATCH --time=1:00:00


ulimit -d unlimited
ulimit -s unlimited
ulimit -t unlimited
ulimit -v unlimited

export SLURM_EXPORT_ENV=ALL

# Slurm variable SLURM_CPUS_PER_TASK is set to the value
# of --cpus-per-task, but only if explicitly set

# Set OMP_NUM_THREADS to the same value as --cpus-per-task
# with a fallback option in case it isn't set.
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1}


# Load your app here
# ... run your MPI/OpenMP program ...

# OpenMPI
mpirun -np $SLURM_NTASKS Your-App-Here 1>output 2>error

# Intel MPI or MPICH, or OpenMPI
srun --mpi=pmi2 Your-App-Here 1>output 2>error