#!/bin/bash

# Readme
# A multi-threaded (or multi-core or multi-process) job 
# uses multiple CPUs (cores/threads) with shared memory on 1 compute node. 
# This is a common use case as it enables basic parallel computing.
# Please note that you may have to modify your scripts and programs 
# to explicitly use multiple CPUs (cores/threads), 
# depending on the application or programming language you are using.

# Some compiled programming languages like C, C++, and Fortran use OpenMP for multi-threading. 
# In these cases, you should compile your programs with an openmp flag and 
# explicitly set the environment variable OMP_NUM_THREADS (number of threads to parallelize over) 
# in your job scripts. 
# The OMP_NUM_THREADS count should equal the requested --cpus-per-task option in the job script.

#SBATCH --account=acc_XXX
#SBATCH --partition=workq
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=16
#SBATCH --mem=32G
#SBATCH --time=1:00:00


ulimit -d unlimited
ulimit -s unlimited
ulimit -t unlimited
ulimit -v unlimited

export SLURM_EXPORT_ENV=ALL

# Slurm variable SLURM_CPUS_PER_TASK is set to the value
# of --cpus-per-task, but only if explicitly set

# Set OMP_NUM_THREADS to the same value as --cpus-per-task
# with a fallback option in case it isn't set.
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1}


# Load your app here
# ... run your MPI/OpenMP program ...

# OpenMPI
mpirun -np $SLURM_NTASKS Your-App-Here 1>output 2>error

# Intel MPI or MPICH, or OpenMPI
srun --mpi=pmi2 Your-App-Here 1>output 2>error