#this is totally dependant on the following
    #Must be running Linux (possibly even Ubuntu)
    #Must have s3cmd installed and configured with a credentials file
    #Must have Amazon Elastic Map Reduce command line tools installed and configured

    # Ruby EMR Command line tool: http://developer.amazonwebservices.com/connect/entry.jspa?externalID=2264&categoryID=266
    #s3cmd: command line S3 client: http://s3tools.org/s3cmd


## I need a function that adds packages (cran and non) to a running cluster



listToCsv <- function(inList, outFileName){
  require(caTools)
  if (is.list(inList) == F) 
        stop("listToCsv: The input list fails the is.list() check.")
  fileName <- outFileName
  cat("", file=fileName, append=F)
  
  i <- 1
  for (item in inList) {
    myLine <- paste(i, ",", base64encode(serialize(item, NULL, ascii=T)), "\n", sep="")
    cat(myLine, file=fileName, append=T) 
    i <- i+1
  }
}

csvToList <- function(inFileName){
  require(caTools)
  linesIn <- readLines(fileName, n=-1)
  outList <- NULL
  
  i <- 1
  for (line in linesIn){
    outList[[i]] <- unserialize(base64decode(strsplit(linesIn[[i]], split=",")[[1]][[2]], "raw"))
    i <- i+1
  }
  return(outList)
}


getFinalStatus <- function(jobFlowId){
  while (checkStatus(jobFlowId)$ExecutionStatusDetail$State %in% c("COMPLETED", "FAILED",
                                                                   "TERMINATED", "WAITING")  == F) {
    message(paste((checkStatus(jobFlowId)$ExecutionStatusDetail$State), " - ", Sys.time(), sep="" ))
    Sys.sleep(30)
  }

  return(checkStatus(jobFlowId)$ExecutionStatusDetail$State)
}

checkStatus <- function(jobFlowId){
  # this works best if this change mentioned in this article is made
  # http://developer.amazonwebservices.com/connect/thread.jspa?threadID=46583&tstart=60
  # Otherwise I had issues with the request timing out
  
  require(rjson)
  emrJson <- paste(system(paste("~/EMR/elastic-mapreduce --describe --jobflow ",
                                jobFlowId, sep=""), intern=T))
  emrJson <- gsub("\\\\", "\\", emrJson) #handle the double escaped text
  parser <- newJSONParser()
    
  for (i in 1:length(emrJson)){
      parser$addData(emrJson[i])
  }
    
  return(parser$getObject()[[1]][[1]])
}

# july 12, 2010 - changed so that all clusters are created and
# left running and then manually shut down

createCluster <- function(numInstances=2, bootStrapLatestR=T,
                          cranPackages=NULL, enableDebugging=F){
  #TODO: add support for different instance sizes

  clusterObject <- list(numInstances=numInstances,
                        cranPackages=cranPackages,
                        enableDebugging=enableDebugging,
                        bootStrapLatestR=bootStrapLatestR)
  
  localTempDir <- tempdir()
  clusterObject$localTempDir <- localTempDir
  clusterObject$localTempDirOut <- paste(localTempDir, "/out", sep="")
  
  s3TempDir <- tolower(unlist(strsplit(localTempDir, "/"))[length(unlist(strsplit(localTempDir, "/")))])
  deleteS3Bucket(s3TempDir)
  clusterObject$s3TempDir <- s3TempDir
  
  s3TempDirOut <- tolower(paste(s3TempDir , "out", sep=""))
  deleteS3Bucket(s3TempDirOut)
  clusterObject$s3TempDirOut <- s3TempDirOut

  #create the s3 bucket
  system(paste("s3cmd mb s3://",
               s3TempDir, sep=""))
  
  #upload the bootstrapper to S3 if needed
  #how do I do this in a package? Right now this is hard coded
  if (bootStrapLatestR==T) {
    system(paste("s3cmd put /home/jal/Documents/R/EMRExample/emrPackage/bootstrap.sh s3://",
               s3TempDir,  "/bootstrap.sh" , sep=""))
  }
  clusterObject$bootStrapLatestR <- bootStrapLatestR
  
  # start cluster
  #jobFlowId <- startCluster(numInstances, s3TempDir, s3TempDirOut, bootstrapLatestR)
  jobFlowId <- startCluster(clusterObject)
  
  clusterObject$jobFlowId <- jobFlowId
  
  return(clusterObject)
}

startCluster <- function(clusterObject){
  numInstances     <- clusterObject$numInstances
  s3TempDir        <- clusterObject$s3TempDir 
  s3TempDirOut     <- clusterObject$s3TempDirOut
  bootStrapLatestR <- clusterObject$bootStrapLatestR
  verbose          <- T
  numInstances     <- clusterObject$numInstances

 # fire up a cluster
 # returns NA if job fails
  emrCall <- paste("~/EMR/elastic-mapreduce --create --stream --name emrFromR ",
                    "--alive ", 
                    "--num-instances ", numInstances, " ", 
                    if (bootStrapLatestR==T) {paste("--bootstrap-action  s3://",
                          s3TempDir, "/bootstrap.sh ", sep="")}, 
                    sep="")
  
  emrCallReturn <- system(emrCall, intern=T)
  message(emrCallReturn)
  if (substr(emrCallReturn, 1, 16)!= "Created job flow"){
    message(paste("The cluster did not launch properly. The command line was ", emrCall, sep=""))
    return(NA)
    stop()
  } 

  jobFlowId <- substr(emrCallReturn, 18, nchar(emrCallReturn))

  while (checkStatus(jobFlowId)$ExecutionStatusDetail$State %in%
         c("COMPLETED", "FAILED", "TERMINATED", "WAITING", "CANCELLED")  == F) {
    message(paste((checkStatus(jobFlowId)$ExecutionStatusDetail$State), " - ", Sys.time(), sep="" ))
    Sys.sleep(45)
  }

  if (checkStatus(jobFlowId)$ExecutionStatusDetail$State == "WAITING") {
    message("Your Amazon EMR Hadoop Cluster is ready for action. \nRemember to terminate your cluster with terminateCluster().\nAmazon is billing you!")
  }
  
  if (checkStatus(jobFlowId)$ExecutionStatusDetail$State %in%
         c("COMPLETED", "WAITING")  == T) {return(jobFlowId)}
}

terminateCluster <- function(clusterObject, deleteTemp=T){
  system(paste("~/EMR/elastic-mapreduce --terminate --jobflow ", clusterObject$jobFlowId, sep=""), intern=T)
  if (deleteTemp==T) {
    deleteS3Bucket(clusterObject$s3TempDir)
    deleteS3Bucket(clusterObject$s3TempDirOut)
    unlink(clusterObject$localTempDir, recursive = T)
    unlink(clusterObject$localTempDirOut, recursive = T)
  }
}

deleteS3Bucket <- function(bucketName){
  system(paste("s3cmd del --force s3://", bucketName,  "/*", sep=""))
  system(paste("s3cmd rb s3://", bucketName,  "/", sep=""))
}

submitJob <- function(clusterObject){
  jobFlowId    <- clusterObject$jobFlowId
  s3TempDir    <- clusterObject$s3TempDir
  s3TempDirOut <- clusterObject$s3TempDirOut
  enableDebugging <- clusterObject$enableDebugging

  deleteS3Bucket(s3TempDirOut)
  
  emrCall <- paste("~/EMR/elastic-mapreduce  --stream ",
                     " --jobflow ", jobFlowId, 
                     " --input s3n://", s3TempDir, "/stream.txt",
                     " --mapper s3n://", s3TempDir, "/mapper.R ",
                     " --reducer cat ",
                     " --output s3n://", s3TempDirOut, "/  ", 
                     " --cache s3n://", s3TempDir, "/emrData.RData#emrData.RData",
                     if (enableDebugging==T) {" --enable-debugging "} ,
                   sep="")
  
  emrCallReturn <- system(emrCall, intern=T)
  message(emrCallReturn)
  if (substr(emrCallReturn, 1, 14)!= "Added steps to"){
    message(paste("The job did not submit properly. The command line was ", emrCall, sep=""))
    return("Job Flow Creation Failed")
    stop()
  }
  Sys.sleep(15)
  if (enableDebugging==T){Sys.sleep(45)} #debugging has to be set up on each job so it takes a bit
  
  while (checkStatus(jobFlowId)$ExecutionStatusDetail$State %in%
         c("COMPLETED", "FAILED", "TERMINATED", "WAITING", "CANCELLED")  == F) {
    message(paste((checkStatus(jobFlowId)$ExecutionStatusDetail$State), " - ", Sys.time(), sep="" ))
    Sys.sleep(10)
  }
  return(checkStatus(jobFlowId)$ExecutionStatusDetail$State)
}

emrlapply <- function(X, FUN, clusterObject, ... ) {
 
  #set up a local temp directory
  myTempDir <- clusterObject$localTempDir

  #the function to apply gets put into myFunction
  myFun <- FUN
  funArgs <-  as.list(substitute(list(...)))[-1L]

  cranPackages <- clusterObject$cranPackages 
  
  #save the objects
  objectsFileName <-paste(myTempDir ,"/emrData.RData", sep="") 
  save(cranPackages,
       myFun,
       funArgs,  
       file = objectsFileName,
       compress="xz")

  #delete the contents of the s3TempDir and s3TempDirOut
  
  s3TempDir <- clusterObject$s3TempDir
  system(paste("s3cmd del --force s3://", s3TempDir,  "/*", sep=""))
  system(paste("s3cmd mb  s3://", s3TempDir,  "/", sep=""))
  
  s3TempDirOut <- clusterObject$s3TempDirOut
  system(paste("s3cmd del --force s3://", s3TempDirOut,  "/*", sep=""))
  system(paste("s3cmd rb s3://", s3TempDirOut,  "/", sep=""))
  
  #upload the datafile to S3
  system(paste("s3cmd put ", objectsFileName , " s3://", s3TempDir, 
              "/emrData.RData" , sep=""))
  
  #upload the mapper to S3
  #needs to be altered for a package
  system(paste("s3cmd put /home/jal/Documents/R/EMRExample/emrPackage/mapper.R s3://",
               s3TempDir,  "/mapper.R" , sep=""))
  
  #serialize the X list to a temp file
  streamFile <- paste(myTempDir, "/stream.txt", sep="")
  listToCsv(X, streamFile)
  
  #now upload stream.txt to EMR
  system(paste("s3cmd put ", streamFile , " s3://", s3TempDir, 
               "/stream.txt" , sep=""))
  
  finalStatus <- submitJob(clusterObject) 
  myTempDirOut <- clusterObject$localTempDirOut
  
  if (finalStatus %in% c("COMPLETED", "WAITING")) {
    system(paste("mkdir ", myTempDirOut, sep="" ))
    system(paste("rm ", myTempDirOut, "/*", sep=""))
    system(paste("s3cmd get  s3://", s3TempDirOut, 
              "/* ", myTempDirOut, "/", sep=""))

    #open files
    returnedFiles <- list.files(path=myTempDirOut, pattern="part")
    #yes, I read all the results into R then write them out to a text file
    #There was a reason for doing this, but I don't remember it
    #this could all be done in one step
    combinedOutputFile <- file(paste(myTempDirOut, "/combinedOutput.csv", sep=""), "w")
    unparsedOutput <- NULL
    for (file in returnedFiles){
        lines <- readLines(paste(myTempDirOut, "/", file, sep="")) 
        for (line in lines) {
          if (substr(line, 1, 9) == "<result>,") {
            write(substr(line, 10, nchar(line)), file=combinedOutputFile)
          }
        }
    }
    close(combinedOutputFile)
    
    require(caTools)
    lines <- strsplit(readLines(paste(myTempDirOut, "/combinedOutput.csv", sep="")),
                      split=",")
    output <- NULL
    
    for (i in 1:length(lines)){
      output[[as.numeric(lines[[i]][[1]])]] <- (unserialize(base64decode(substr(lines[[i]][[2]],
                                                                               1, nchar(lines[[i]][[2]])-1),
                                                                        "raw")))
    }
    return(as.list(output))
  }
}

#test case
myList <- NULL
set.seed(1)
for (i in 1:10){
  a <- c(rnorm(999), NA)
  myList[[i]] <- a
}

outputLocal <- unlist(lapply(myList, mean, na.rm=T))
outputLocal <- (lapply(myList, mean, na.rm=T))

myCluster   <- createCluster(numInstances=2, bootStrapLatestR=T, cranPackages=c("Hmisc", "plyr"))

outputEmr   <- emrlapply(myList, mean, myCluster, na.rm=T)


all.equal(outputEmr, outputLocal)

terminateCluster(myCluster)

#TODO:
#  Need to clean up temp directory after running jobs and also after closing cluster
#  What about conflicts with local or remote directory?
#  

