#!/bin/bash
#PBS -l walltime=00:30:00
##PBS -l nodes=1:ppn=1:himem
#PBS -l nodes=1:ppn=1
#PBS -V
#PBS -q normal
#PBS -N fft1d_fftw.impi.p1x32
cd $SCR             # change to job scratch directory, 

## run on submit directory
#cd $PBS_O_WORKDIR
 
module swap  mvapich2-1.7rc1-intel-12.0.4  intel/mpi
module load intel/12.0.4

export NP=`cat $PBS_NODEFILE | wc -l`
#export NP=`wc -l ${PBS_NODEFILE} | cut -d'/' -f1`

##module load mvapich2-1.7rc1-intel-12.0.4
##export MV2_SRQ_SIZE=4000
export MV2_ENABLE_AFFINITY=0

export OMP_NUM_THREADS=12
cp /ui/ncsa/jnkim/build/hpc-intel-mpi/bin/fft_alloc_bench .
#mpirun_rsh -ssh -np ${NP} -hostfile ${PBS_NODEFILE} ./fft_alloc_bench --enable-fftw -d 12288 -n 288 -b 144 -i 10
#mpirun_rsh -ssh -np ${NP} -hostfile ${PBS_NODEFILE} ./fft_alloc_bench --enable-mkl -d 12288 -n 288 -b 144 -i 10
#mpirun --rsh=ssh -np ${NP} ./fft_alloc_bench --enable-mkl -d 12288 -n 288 -b 144 -i 10
mpirun --rsh=ssh -np ${NP} ./fft_alloc_bench --enable-fftw -d 12288 -n 288 -b 144 -i 10

#cp /ui/ncsa/jnkim/svnwork/hpccodelets/openmpi/bin/fft_alloc_bench .
#module unload mvapich2-1.7rc1-intel-12.0.4 
#module load openmpi-1.4.3-intel-12.0.4
#export OMPI_MCA_btl_openib_flags=1
#export OMPI_MCA_btl_mpi_leave_pinned=0
##/usr/local/mpi/openmpi/openmpi-1.4.3-intel-12.0.4/bin/mpirun -np ${NP} -hostfile ${PBS_NODEFILE} ./fft_alloc_bench --enable-fftw -d 12288 -n 288 -b 144 -i 10
#/usr/local/mpi/openmpi/openmpi-1.4.3-intel-12.0.4/bin/mpirun -np ${NP} -hostfile ${PBS_NODEFILE} ./fft_alloc_bench --enable-mkl -d 12288 -n 288 -b 144 -i 10

