#! /bin/bash
#PBS -N DistMatMult
#PBS -l walltime=02:00:00
#PBS -q class
#PBS -o output/DistMatMult.out
#PBS -e output/DistMatMult.err
#PBS -d /nethome/jchua3/vetter/cse6220/project/
#PBS -v np=4
#PBS -l nodes=4:sixcore

cd $PBS_O_WORKDIR
echo "*****we are in "
echo $PBS_O_WORKDIR
echo "*****"

#run R on all the nodes we want
`which mpirun` -n 1 -machinefile $PBS_NODEFILE R --vanilla > output/DistMatMult.R.out <<EOF

# RUN R SCRIPT FOR TESTING GPU MAT MULT KERNEL

library(gputools)
library(MASS) 

#set working directory of main source path file and source it
setwd("/nethome/jchua3/vetter/cse6220/project/")
source("source.R")

#start up the MPI cluster with np instances of R, running on all
#available nodes
cl <- startCluster($np)

#prepare our experiments
#size <- c(500,1000,1500,2000,2500,3000,3500)
#size <- c(500,1000,1500,2000,2500,3000)
size <- c(1500, 2500, 3500)
results <- list()

for (i in 1:length(size)) {
 
    #construct data
    mu <- mat.or.vec(size[i],1)
    cov <- mat.or.vec(size[i],size[i])
    diag(cov) <- 1
    A <- mvrnorm(size[i],mu,cov)
    B <- mvrnorm(size[i],mu,cov)
    C <- mvrnorm(size[i],mu,cov)
    
    #this actually does not do a deep copy, it just points
    #to the variable in memory so it's not as bad as it looks
    data <- list(A,B,A,B,A,B,A,B,A,B,A,B,A,B,A,B,A,B,A,B,
                 A,B,A,B,A,B,A,B,A,B,A,B,A,B,A,B,A,B,A,B,
                 A,B,A,B,A,B,A,B,A,B);

    #run GPU matmult and save results
    #results <- clusterCall(cl, gpuSvd, A, B)
    results[[i]] <- distTime(taskPullNormal(cl,data,gpuFuncWrap,gpuMatMult,cpuMatMult,nodeMap,C))

}

# save our results
save(results, file="output/DistMatMult.Rdata")
stopCluster()
mpi.quit()


EOF



