#!/bin/bash
#SBATCH --partition=batch
#SBATCH --nodes=1             # Max is 1
#SBATCH --ntasks=5           # Max is 40 (2x 20 Intel Broiadwell CPUs)
#SBATCH --cpus-per-task=1    # Max is 2 (hyper-threading is on)
#SBATCH --gres=gpu:1          # Max is 8 (full DGX-1)
#SBATCH --time=1:00:00        # Max is 4 hours

module purge > /dev/null 2>&1
module load Core/gcc/8.4.0 gcc/8.4.0/openmpi/3.1.6 gcc/8.4.0/openblas/0.3.10-openmp gcc/8.4.0/eigen/3.3.7

export MYEXE=main.t
export OMP_PROC_BIND=true
#export OMP_NUM_THREADS=40

mpirun -n 1 main.t > result_2.txt
#mpirun -np 8  --map-by ppr:8:node:PE=5 --mca btl ^openib --report-bindings ${MYEXE} 2>&1 | tee out.${SLURM_JOBID}
 
 
#
#srun hostname -s | sort -u >slurm.hosts
 
#module load intel/2017.1
#mpiicc -o hellompi.o hellompi.c
#mpiexec -n 2 -machinefile slurm.hosts ./main.t > result.txt
#rm -rf slurm.hosts 
