package com.workday.gridsimulator

import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Props
import akka.pattern.ask
import akka.event.Logging
import akka.util.duration._
import akka.util.Timeout

/**
 * The grid actor.
 *
 * Is responsible for allocating the work that is submitted to the grid by the WorkScheduler actor.
 *
 * Also responsible for the creation of workers/slaves which it allocates the work to.
 *
 * Will keep allocating until all work submitted to the grid is completed. The grid then tells all workers there is no more work
 * and then lets the Clock know it is finished.
 *
 * Can also be registered with in order to receive SimulationStatus updates when a piece of work is completed.
 *
 * @param clock the clock
 *
 * @param workAllocator this is used by the grid for deciding what order work is allocated in
 *
 * @param workerCreator the grid delegates the creation of workers/slaves to this
 *
 */
class Grid(val clock: ActorRef, val workAllocator: WorkAllocator, workerCreator: WorkerCreation) extends Actor {

  private val log = Logging(context.system.eventStream, self.path.toString())
  implicit val timeout = Timeout(3 seconds)

  private var updateListenerActor: ActorRef = _
  private var updateAfterThisManyCompletions: Int = 0
  private var noMoreWork: Boolean = false
  private var piecesOfWorkSubmitted: Int = 0
  private var piecesOfWorkCompleted: Int = 0
  private var freeWorkers: List[Worker] = List()
  private var allWorkers: List[Worker] = List()

  private var createWorkerSize = 0
  private var tellWhenAllWorkersRegged: ActorRef = null
  private var nextScheduleTime = 0.0
  private var firstFinishTime = 0.0
  private var piecesOfWorkAllocated = 0
  private var temp = 0.0
  private var lastWorkerCreatedId = 0
  private var workersEc2InstanceType: Ec2Instance = null

  private var simCost = 0.0
  private var totalWorkDuration = 0.0
  private var workerCostReceivedCount = 0
  private var maxWorkerCountReached = 0

  private var workScheduler: ActorRef = _
  private var workSchedulerWaiting = false
  //private var limit = 100

  private var workArrivalRate = 0.0
  private var workCompletionRate = 0
  private var workersRequired = 0
  private var gridIsAutoSizing = false
  private var autoSizeComplete = false

  private var nextWorkerToAssignTo: Worker = _

  override def receive = {
    case "Prepare" =>
      prepareToStart

    case "AutoSize" =>
      gridIsAutoSizing = true

    case CreateWorkers(count, ec2InstanceType) =>
      createWorkerSize = count
      tellWhenAllWorkersRegged = sender
      workerCreator.createWorkers(0, count, ec2InstanceType, clock, self, context.system)
      lastWorkerCreatedId = count
      if (ec2InstanceType != null) workersEc2InstanceType = ec2InstanceType

    case WorkerRegistration(worker) =>
      log.info(worker.id + " wants to register")

      if (!allWorkers.contains(worker) && !freeWorkers.contains(worker)) {
        log.info("worker " + worker + " has registered")
        println(("worker " + worker.id + " has registered"))
        allWorkers = worker :: allWorkers
        // allWorkers = worker :: allWorkers
        allWorkers = allWorkers.sortBy(_.id)
        freeWorkers = freeWorkers ::: List(worker)
        freeWorkers.sortBy(_.id)

        for (worker <- allWorkers) println(worker.id)

        maxWorkerCountReached += 1
        if (maxWorkerCountReached == 1) nextWorkerToAssignTo = worker

        if (createWorkerSize == allWorkers.size && tellWhenAllWorkersRegged != null) {
          log.info("all workers registered")
          tellWhenAllWorkersRegged ! "WorkersRegistered"
        }

        if (workersRequired == allWorkers.size) {
          autoSizeComplete = true
          println("finished autosizing")
        }
      }

    case WorkSubmission(currentTime, work) =>
      log.info("work " + work.id + " submitted to static grid at " + currentTime)
      piecesOfWorkSubmitted += 1
      if (piecesOfWorkSubmitted == 1) nextScheduleTime = currentTime
      workSubmitted(currentTime, work)
      workScheduler = sender

      if (gridIsAutoSizing) autoSize(work)

    case WorkCompleted(currentTime, work, worker) =>
      log.info("work id " + work.id + " completed at " + currentTime)
      piecesOfWorkCompleted += 1
      if (updateListenerActor != null) {

        if (piecesOfWorkCompleted % 250 == 1) //send an update every 250 work completed ( we dont want to smash the clients browsers with updates!)
          updateListenerActor ! SimulationStatus(piecesOfWorkSubmitted, piecesOfWorkCompleted, TimeHelper.formatTime(currentTime), allWorkers.size)

        else if (piecesOfWorkCompleted == piecesOfWorkSubmitted) //make sure we send an update when all completed
          updateListenerActor ! SimulationStatus(piecesOfWorkSubmitted, piecesOfWorkCompleted, TimeHelper.formatTime(currentTime), allWorkers.size)

      }
      accumulateWork(work)
      workCompleted(currentTime, work, worker)

      if (workSchedulerWaiting && piecesOfWorkSubmitted - piecesOfWorkCompleted <= 250) {
        workScheduler ! "SendMeMoreWork"
        workSchedulerWaiting = false
      } //else println("WSW: " + workSchedulerWaiting + ", pws: " + piecesOfWorkSubmitted + ", pwc: " + piecesOfWorkCompleted)

    case WorkArrivalFinished(currentTime) =>
      log.info("workarrival finished")
      noMoreWork = true

    case Stop(currentTime: Double) =>
      printStats
      shutDownWorkers
      if (noMoreWork && freeWorkers.size == allWorkers.size && piecesOfWorkSubmitted == piecesOfWorkCompleted && piecesOfWorkCompleted == piecesOfWorkAllocated) {
        log.info("sending sim result to listenerActor")
        if (updateListenerActor != null) {
         // updateListenerActor ! SimulationStatus(piecesOfWorkSubmitted, piecesOfWorkCompleted, TimeHelper.formatTime(currentTime), allWorkers.size)
          updateListenerActor ! SimFinished("Simulation finished successfully!", printResult(currentTime, simCost))
        }
      } else {
        log.info("sending sim result to listenerActor")
        if (updateListenerActor != null) updateListenerActor ! SimFinished("Simulation terminated early.", "unable to calculate cost for terminated simulation")
      }
    //clock ! Finished

    case WorkerCost(workerCost, currentTime) =>
      //println("worker had cost of: " + workerCost)
      accumulateCost(workerCost)
      workerCostReceivedCount += 1
      //updateListenerActor ! SimulationStatus(piecesOfWorkSubmitted, piecesOfWorkCompleted, TimeHelper.formatTime(currentTime), allWorkers.size)
      if (workerCostReceivedCount == allWorkers.size) {
        clock ! Finished(self)
        self ! Stop(currentTime)
      }

    case FreeWorkerCount =>
      sender ! freeWorkers.size

    case AllWorkerCount =>
      log.info("asked for awc")
      sender ! allWorkers.size

    case WorkSubmittedCount =>
      sender ! piecesOfWorkSubmitted

    case WorkCompletedCount =>
      sender ! piecesOfWorkCompleted

    case WorkAllocatedCount =>
      sender ! piecesOfWorkAllocated

    case NextScheduleTime =>
      sender ! nextScheduleTime

    case RegMeForUpdates(actor, updateAfterThisManyCompletions) =>
      log.info("registered " + actor + " for updates ")
      updateListenerActor = actor
      this.updateAfterThisManyCompletions = updateAfterThisManyCompletions
    case UserSaidStop =>
      log.info("told by the user to stop simulation")
      clock ! Stop

    case UserSaidAddWorker =>
      log.info("user said to add a worker")
      workerCreator.createWorkers(lastWorkerCreatedId, 1, workersEc2InstanceType, clock, self, context.system)
      lastWorkerCreatedId += 1

    case UserSaidRemoveWorker =>
      log.info("user said to remove a worker")
      freeWorkers = freeWorkers.dropRight(1)
      allWorkers = allWorkers.dropRight(1)

    case UserSaidIncrementArrivalRate =>
      workScheduler ! UserSaidIncrementArrivalRate

    case UserSaidDecrementArrivalRate =>
      workScheduler ! UserSaidDecrementArrivalRate

    case "TellMeWhenToSubmitMoreWork" =>
      println("SCHEDULER IS WAITING ON ME")
      workSchedulerWaiting = true

    case "CostOfSimulation" =>
      sender ! simCost

    case _ =>
      log.warning("unexpected message received from " + sender)
  }

  /**
   * Updates the works starts,
   * adds the work to the workAllocator
   * and if any workers available, tries to assign some work
   *
   * @param currentTime the currentTime
   *
   * @param work the piece of work to add to the workAllocator
   */
  def workSubmitted(currentTime: Double, work: Work) = {
    work.startTime = currentTime
    if (workAllocator != null) workAllocator += work
    if (freeWorkers.size != 0) {
      /*  val worker = freeWorkers.head
      freeWorkers = freeWorkers.tail
      assignWork(nextScheduleTime, worker)*/

      if (gridIsAutoSizing) {
        if (autoSizeComplete) {
          println("WS ASC")
          val worker = freeWorkers.head
          freeWorkers = freeWorkers.tail
          assignWork(nextScheduleTime, worker)
          //assignWork(currentTime, worker)

        }
      } else {
        val worker = freeWorkers.head
        freeWorkers = freeWorkers.tail
        assignWork(nextScheduleTime, worker)
        //assignWork(currentTime, worker)

      }
    } //else println("no free workers")
  }

  /**
   * Tries to assign work if possible.
   *
   * If all work is completed, tell the Clock we are finished, tell the workers there is no more work and tell ourself to stop
   */
  def assignWork(currentTime: Double, worker: Worker): Unit = {

    if (workAllocator != null) {
      if (workAllocator.isEmpty) {
        if (noMoreWork && freeWorkers.size == allWorkers.size && piecesOfWorkSubmitted == piecesOfWorkCompleted && piecesOfWorkCompleted == piecesOfWorkAllocated) {
          for (worker <- allWorkers) worker.actorRef ! NoMoreWork(currentTime)
          log.info("all work complete, notifying clock that grid is finished")
          //clock ! Finished
          self ! Stop(currentTime)
        }
      } //more work to do
      else {

        if (nextWorkerToAssignTo.id == worker.id) {

          val work = workAllocator.getWorkItem

          //update the work execution time based on the ec2 instance type it wil be executed on
          if (workersEc2InstanceType != null) work.execTime = workersEc2InstanceType.executionTimeModifier(work.execTime)
          log.info("allocated " + worker.id + " work id " + work.id + " at " + currentTime)
          println("allocated " + worker.id + " work id " + work.id + " at " + currentTime)
          worker.actorRef ! AllocatedWork(currentTime, work)
          piecesOfWorkAllocated += 1
          updateScheduleTime(currentTime, work)

          val idOfWorkerJustAssignedTo = worker.id

          val idOfNextWorker = idOfWorkerJustAssignedTo + 1

          nextWorkerToAssignTo = allWorkers.find(_.id == idOfNextWorker).getOrElse(null)

          //if it is null, we just assigned to the last worker in allWorkers list
          //so we go back to the head of the list
          if (nextWorkerToAssignTo == null) nextWorkerToAssignTo = allWorkers.head

          println("ASSIGNED TO " + worker.id + ", ASSIGNING NEXT TO " + nextWorkerToAssignTo.id)

        } //asynchronous nature - cant rely on ordering of messages
        //we received a work completed message from a worker that should "finish later in time" than the worker we expect to finish first
        //
        else {
          println("CANT ASSIGN TO " + worker.id + " AS IT IS NOT THE NEXT SCHEDULED WORKER WHICH IS " + nextWorkerToAssignTo.id)
          println("adding back " + worker.id)
          freeWorkers = freeWorkers ::: List(worker)

          if (freeWorkers.contains(nextWorkerToAssignTo)) {
            println("nextWorker: " + nextWorkerToAssignTo.id + " is available, assigning at " + currentTime)

            assignWork(currentTime, nextWorkerToAssignTo)
          } else println("nextWorkerToAssignTo " + nextWorkerToAssignTo.id + " is not available as of " + currentTime)
        }
      }
    }

  }

  /**
   * Add back the worker that just completed some work ( to the end of the free workers list)
   *
   * Try to assign more work if there is some, otherwise stop and notify all necessary actors
   *
   * @param currentTime the work completion time
   *
   * @param work the piece of work completed
   *
   * @param worker the worker which completed the work
   */
  def workCompleted(currentTime: Double, work: Work, worker: Worker) = {

    if (worker != null) {
      if (!freeWorkers.contains(worker)) {
        //    println("fw before: ")
        //   for (workerz <- freeWorkers) println(workerz.actorRef.path)
        freeWorkers = freeWorkers ::: List(worker)
        //     freeWorkers.sortBy(_.id)
        log.info("added back " + worker)
        println("ADDED BACK " + worker.actorRef.path + " fw size : " + freeWorkers.size)
        //     for (workerz <- freeWorkers) println(workerz.actorRef.path)
      }
    }

    if (!workAllocator.isEmpty) {
      /* assignWork(nextScheduleTime, freeWorkers.head)
      freeWorkers = freeWorkers.tail*/

      if (gridIsAutoSizing) {
        if (autoSizeComplete) {
          println("WC ASC")
          assignWork(nextScheduleTime, freeWorkers.head)
          //assignWork(currentTime, freeWorkers.head)
          freeWorkers = freeWorkers.tail
        }
      } else {
        assignWork(nextScheduleTime, freeWorkers.head)
        //assignWork(currentTime, freeWorkers.head)
        freeWorkers = freeWorkers.tail
      }

    } else if (noMoreWork && freeWorkers.size == allWorkers.size && piecesOfWorkSubmitted == piecesOfWorkCompleted && piecesOfWorkCompleted == piecesOfWorkAllocated) {
      log.info("all work complete, telling clock grid is finished")
      for (worker <- allWorkers) worker.actorRef ! NoMoreWork(currentTime)
      //if (clock != null) clock ! Finished
      //self ! Stop(currentTime)
    }

  }

  def printStats = {

    println("to do - static grid, stats printing")
    for (w <- allWorkers) w.actorRef ! Stop
  }
  
  def shutDownWorkers = {
    for (worker <- allWorkers) {
      worker.actorRef ! Stop
      allWorkers -= worker
      freeWorkers -= worker
    }
  }

  /**
   * Ensure any new simulation is not affected by previous ones.
   */
  def prepareToStart = {
    log.info("preparing to start")
    //  println("preparing to start")
    freeWorkers = List()
    allWorkers = List()
    piecesOfWorkCompleted = 0
    piecesOfWorkAllocated = 0
    piecesOfWorkSubmitted = 0
    nextScheduleTime = 0
    temp = 0
    noMoreWork = false
    workerCostReceivedCount = 0
    simCost = 0
    totalWorkDuration = 0.0
    maxWorkerCountReached = 0
    gridIsAutoSizing = false
    autoSizeComplete = false
    workSchedulerWaiting = false
    println
  }

  /**
   * Keeps the nextScheduleTime up-to-date
   */
  def updateScheduleTime(currentTime: Double, work: Work) = {

    // println("UPDATING schedule time")
    //  println("POWA: " + piecesOfWorkAllocated + ", AWS: " + allWorkers.size)
    //only have 1 worker, so we can't allocate again to this worker until it finishes what it is working on 
    if (allWorkers.size == 1) nextScheduleTime = currentTime + work.execTime

    //the grid just allocated to the first registered worker, need to remember at what time this was at
    //have more than 1 worker, can allocate again after the works arrival time
    else if (piecesOfWorkAllocated % allWorkers.size == 1) {

      temp = currentTime
      nextScheduleTime = currentTime + work.arrivalTime
      //    println("SET NST TO: " + nextScheduleTime)
    } //just allocated to the last registered worker
    //this means all workers were allocated work
    //so the next schedule time will be when the first worker finishes the work it was allocated
    //that time is when we allocated the work to it (temp) plus how long it takes to exec the work
    else if (piecesOfWorkAllocated % allWorkers.size == 0) {

      nextScheduleTime = temp + work.execTime

      //need this condition due to the asynchronous nature of the system.....
      //cant rely on the ordering of the messages we receive...
      //we might allocate work to some worker and then allocated some more work to a 2nd worker
      //we might get the 2nd workers completion message before the 1st..... etc.
      if (currentTime > nextScheduleTime) {
        nextScheduleTime = currentTime + work.arrivalTime
      }

    } //just allocated to a worker that is neither the first or last
    //so we can allocate again after the work arrival time
    else nextScheduleTime = currentTime + work.arrivalTime

    //   println("NST: " + nextScheduleTime) 

  }

  private def accumulateCost(workerCost: Double) = {
    ///accumalate stats...: duration, cost etc. to do
    //    println("ACCUM")
    simCost += workerCost
    //   println("simCost is: " + simCost)
  }

  private def accumulateWork(work: Work) = {
    totalWorkDuration += work.execTime
  }

  private def printResult(finishTime: Double, simCost: Double): String = {
    //update this to be done correctly

    var result = maxWorkerCountReached + " ec2 instances required for " + TimeHelper.formatTime(finishTime) + " with a total cost of : $" + BigDecimal(simCost).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble + ". "

    if (maxWorkerCountReached != 1) {
      var x = piecesOfWorkCompleted * (totalWorkDuration / piecesOfWorkCompleted)

      result += " In comparison, one worker would of taken time: " + TimeHelper.formatTime(x)
    }
    result
  }

  private def autoSize(work: Work) {
    var sizeDifference = 0
    workArrivalRate = 60 / work.arrivalTime //per minute
    workCompletionRate = (60.0 / workersEc2InstanceType.executionTimeModifier(work.execTime)).toInt //pieces Of Work completed per minute

    //println("work id: " + work.id + ", completed by: " + worker + ", had duration: " + work.duration + ", WCR: " + workCompletionRate)

    println("arrival rate: " + workArrivalRate + " per minute,  wcr: " + workCompletionRate)
    if (workCompletionRate != 0) {

      workersRequired = Math.round((workArrivalRate / workCompletionRate)).toInt
      println("WAR: " + workArrivalRate + ", WCR: " + workCompletionRate + ", PWC: " + piecesOfWorkCompleted)
      println("lwcid:" + lastWorkerCreatedId + ", required: " + workersRequired)
      println("POWA: " + piecesOfWorkAllocated + ", POWC: " + piecesOfWorkCompleted + ", POWS: " + piecesOfWorkSubmitted)

      if (allWorkers.size != workersRequired && lastWorkerCreatedId != workersRequired) {

        var i = 0
        autoSizeComplete = false

        if (allWorkers.size > workersRequired) {
          sizeDifference = allWorkers.size - workersRequired
          println("GRID NEEDS TO AUTOSIZE - AWS: " + allWorkers.size + ", required: " + workersRequired + ", removing: " + sizeDifference + " workers")

          //fix this
          if (freeWorkers.size == allWorkers.size) {
            //     println("removing")
            allWorkers = allWorkers.dropRight(sizeDifference)
            freeWorkers = allWorkers
            autoSizeComplete = true
          }

        } else if (workersRequired > allWorkers.size) {
          sizeDifference = workersRequired - allWorkers.size
          println("GRID NEEDS TO AUTOSIZE - AWS: " + allWorkers.size + ", required: " + workersRequired + ", adding: " + sizeDifference + " workers")

          var i = 0
          /*  while (i < sizeDifference) {
                val worker = context.system.actorOf(Props(new SimpleJobWorker(lastWorkerCreatedId + i, workersEc2InstanceType, self, clock)))
                clock ! AddMe(worker)
                allWorkers = sender :: allWorkers
                freeWorkers = freeWorkers ::: List(worker)

                i += 1
              }
              println("RESIZING done, AWS: " + allWorkers.size)
              for (worker <- freeWorkers) println(worker.path)*/
          workerCreator.createWorkers(lastWorkerCreatedId, sizeDifference, workersEc2InstanceType, clock, self, context.system)
          lastWorkerCreatedId += sizeDifference
          //while(allWorkers.size != workersRequired) println("AWS: " + allWorkers.size + " required: " + workersRequired)
        }
      } else {
        println("aws: " + allWorkers.size + ", workersRequired: " + workersRequired + ",  lastWorkerCreatedId: " + lastWorkerCreatedId)
        autoSizeComplete = true
      }
    } //multiple jobs arriving per minute
    //but a job takes longer than a minute to complete
    //so just autosize such that when a job arrives we have a worker that can begin on it straigh away
    else {

      workersRequired = workArrivalRate.toInt
      println("WR: " + workersRequired)

      if (allWorkers.size != workersRequired && lastWorkerCreatedId != workersRequired) {

        println("workers cant complete a job in 60 seconds")
        println("grid will autosize so jobs are allocated asap")

        if (allWorkers.size > workersRequired) {
          sizeDifference = allWorkers.size - workersRequired
          println("GRID NEEDS TO AUTOSIZE - AWS: " + allWorkers.size + ", required: " + workersRequired + ", removing: " + sizeDifference + " workers")

          //fix this
          if (freeWorkers.size == allWorkers.size) {
            //     println("removing")
            allWorkers = allWorkers.dropRight(sizeDifference)
            freeWorkers = allWorkers
            autoSizeComplete = true
          }
        } else if (workersRequired > allWorkers.size) {
          sizeDifference = workersRequired - allWorkers.size
          println("GRID NEEDS TO AUTOSIZE - AWS: " + allWorkers.size + ", required: " + workersRequired + ", adding: " + sizeDifference + " workers")
          workerCreator.createWorkers(lastWorkerCreatedId, sizeDifference, workersEc2InstanceType, clock, self, context.system)
          lastWorkerCreatedId += sizeDifference
        }
      }
    }

  }
}


