#!/bin/sh

# hostfile
# node0 
# node1 slots=2 
# node2 slots=4 max_slots=4
# node3 slots=4 max_slots=20
#
# mpirun -np 3 --host a,b,c
# mpirun -host node1,node1,node2,node2 ...

#make dbg=0 sm=20 -C ../src clean all && \
#make dbg=0 sm=20 clean all && \

#export MPI_INSTALL_PATH=/usr/lib64/openmpi
MPI_NET="--mca pml ob1 -mca btl tcp,self"
#MPI_NET="--mca pml ob1 -mca btl openib,self"
DBG=0
ARCH=20
#ARCH=13
DEV=0
MPI_HOST="-host $HOSTNAME"
#MPI_HOST="-host agape5,agape6"
NCALLS=10000
NREPS=1
NTHREADS=128

# <> CUOS cuMPI sync test
# -a CUOS cuMPI async test
# -o CUOS=off, use plain CUDA
{
    make dbg=$DBG arch=$ARCH -C ../src all && 
    make dbg=$DBG arch=$ARCH           all && 
    set -x                                 &&
    mpirun -np 2 $MPI_HOST $MPI_NET test_mpi -c $NCALLS -d $DEV -t $NTHREADS
} 2>&1 | tee /tmp/bu.log

#mpirun -np 2 -host agape6,agape5 --mca btl openib,self test_mpi -c 10000 2>&1 | tee /tmp/bu.log
#mpirun -np 2 -host agape5,agape6 --mca btl openib,self test_mpi -c 10 2>&1 | tee /tmp/bu.log
#mpirun -np 2 -host $HOSTNAME --mca btl openib,self test_mpi 2>&1 | tee /tmp/bu.log
#mpirun -np 2 -host agape2 -mca plm rsh -mca plm_rsh_agent ssh --mca pml ob1 -mca btl tcp,self test_mpi
#./test_mpi
#mpirun -np 2 -host apeiron2,apeiron2 test_mpi
#apeiron1 1 C2050
#apeiron2 1 C2050
#mpirun -np 2 -host apeiron1,apeiron2 test_mpi
#ompi_info --all
