#! /bin/bash
#PBS -N Test_GPU_Clust
#PBS -q class
#PBS -o output/test_mm.out
#PBS -e output/test_mm.err
#PBS -d /nethome/jchua3/vetter/cse6220/project/test/

cd $PBS_O_WORKDIR
echo "*****we are in "
echo $PBS_O_WORKDIR
echo "*****"

#run R on all the nodes we want
`which mpirun` -n 1 -machinefile $PBS_NODEFILE R --vanilla > output/test_mm_R.out <<EOF

#working GPU nodes: jinx1, jinx3, jinx15-22

#install.packages("~/snow/snow.tar.gz")
#install.packages("~/gputools_src/gpu_tools.tar.gz", configure.args='--with-cuda-home=/opt/cuda-4.0/cuda')

library(snow)
library(gputools)
library(MASS) 

source("gpuWrapper.R")

.Last <- function() {
    if (is.loaded("mpi_initialize")) {
        if (mpi.comm.size(1) > 0) {
            print("Please use mpi.close.Rslaves() to close slaves.")
            mpi.close.Rslaves()
        }
        print("Please use mpi.quit() to quit R")
        .Call("mpi_finalize")
    }
}

setwd("/nethome/jchua3/vetter/cse6220/project/test")

cl <- makeCluster($np, type="MPI")

#see who's there
clusterCall(cl, function() Sys.info())

#prepare data
    mu <- mat.or.vec(100,1)
    cov <- mat.or.vec(100,100)
    diag(cov) <- 1
    A <- mvrnorm(100,mu,cov)
    B <- mvrnorm(100,mu,cov)
    C <- mvrnorm(100,mu,cov)
    data <- list(A,B)

#run GPU matmult and save results
#results <- clusterCall(cl, gpuSvd, A, B)
results <- clusterApplyLB(cl, data, gpuMatMult, C)   
save(list=ls(all=TRUE), file="output/test_mm_results.Rdata")

stopCluster(cl)
mpi.quit()


EOF



