package org.hong.monkey.scheduler.cluster

import org.apache.hadoop.fs.{Path, FileSystem}

import org.hong.monkey.rpc.RpcAddress
import org.hong.monkey.{Logging, MonkeyContext, MonkeyEnv}
import org.hong.monkey.deploy.MonkeyHadoopUtil
import org.hong.monkey.scheduler.TaskSchedulerImpl

private[monkey] class SimrSchedulerBackend(
    scheduler: TaskSchedulerImpl,
    sc: MonkeyContext,
    driverFilePath: String)
  extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv)
  with Logging {

  val tmpPath = new Path(driverFilePath + "_tmp")
  val filePath = new Path(driverFilePath)

  val maxCores = conf.getInt("monkey.simr.executor.cores", 1)

  override def start() {
    super.start()

    val driverUrl = rpcEnv.uriOf(MonkeyEnv.driverActorSystemName,
      RpcAddress(sc.conf.get("monkey.driver.host"), sc.conf.get("monkey.driver.port").toInt),
      CoarseGrainedSchedulerBackend.ENDPOINT_NAME)

    val conf = MonkeyHadoopUtil.get.newConfiguration(sc.conf)
    val fs = FileSystem.get(conf)

    // TODO: ui
//    val appUIAddress = sc.ui.map(_.appUIAddress).getOrElse("")

    logInfo("Writing to HDFS file: "  + driverFilePath)
    logInfo("Writing Akka address: "  + driverUrl)
//    logInfo("Writing Monkey UI Address: " + appUIAddress)

    // Create temporary file to prevent race condition where executors get empty driverUrl file
    val temp = fs.create(tmpPath, true)
    temp.writeUTF(driverUrl)
    temp.writeInt(maxCores)
//    temp.writeUTF(appUIAddress)
    temp.close()

    // "Atomic" rename
    fs.rename(tmpPath, filePath)
  }

  override def stop() {
    val conf = MonkeyHadoopUtil.get.newConfiguration(sc.conf)
    val fs = FileSystem.get(conf)
    if (!fs.delete(new Path(driverFilePath), false)) {
      logWarning(s"error deleting ${driverFilePath}")
    }
    super.stop()
  }

}
