package mesosphere.marathon

import java.util.concurrent.CountDownLatch
import java.util.{Timer, TimerTask}

import akka.Done
import akka.actor.{ActorRef, ActorSystem}
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import com.google.common.util.concurrent.AbstractExecutionThreadService
import com.typesafe.scalalogging.StrictLogging
import javax.inject.{Inject, Named}
import mesosphere.marathon.MarathonSchedulerActor._
import mesosphere.marathon.core.deployment.{DeploymentManager, DeploymentPlan, DeploymentStepInfo}
import mesosphere.marathon.core.election.{ElectionCandidate, ElectionService}
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.heartbeat._
import mesosphere.marathon.core.leadership.LeadershipCoordinator
import mesosphere.marathon.core.storage.store.PersistenceStore
import mesosphere.marathon.state.{AbsolutePathId, AppDefinition, Timestamp}
import mesosphere.marathon.storage.migration.Migration
import mesosphere.util.PromiseActor
import org.apache.mesos.SchedulerDriver

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.Failure

/**
  * PrePostDriverCallback is implemented by callback receivers which have to listen for driver
  * start/stop events
  */
trait PrePostDriverCallback {

  /**
    * Will get called _before_ the driver is running, but after migration.
    */
  def preDriverStarts: Future[Unit]

  /**
    * Will get called _after_ the driver terminated
    */
  def postDriverTerminates: Future[Unit]
}

/**
  * DeploymentService provides methods to deploy plans.
  */
// TODO (AD): do we need this trait?
trait DeploymentService {

  /**
    * Deploy a plan.
    * @param plan the plan to deploy.
    * @param force only one deployment can be applied at a time. With this flag
    *              one can control, to stop a current deployment and start a new one.
    * @return a failed future if the deployment failed.
    */
  def deploy(plan: DeploymentPlan, force: Boolean = false): Future[Done]

  def listRunningDeployments(): Future[Seq[DeploymentStepInfo]]
}

/**
  * Wrapper class for the scheduler
  */
class MarathonSchedulerService @Inject() (
    persistenceStore: PersistenceStore[_, _, _],
    leadershipCoordinator: LeadershipCoordinator,
    config: MarathonConf,
    electionService: ElectionService,
    prePostDriverCallbacks: Seq[PrePostDriverCallback],
    groupManager: GroupManager,
    driverFactory: SchedulerDriverFactory,
    system: ActorSystem,
    migration: Migration,
    deploymentManager: DeploymentManager,
    @Named("schedulerActor") schedulerActor: ActorRef,
    heartbeatMonitor: MesosHeartbeatMonitor
)(implicit mat: Materializer)
    extends AbstractExecutionThreadService
    with ElectionCandidate
    with DeploymentService
    with StrictLogging {

  import scala.concurrent.ExecutionContext.Implicits.global

  implicit val zkTimeout = config.zkTimeoutDuration

  val isRunningLatch = new CountDownLatch(1)

  // Time to wait before trying to reconcile app tasks after driver starts
  val reconciliationInitialDelay =
    Duration(config.reconciliationInitialDelay(), MILLISECONDS)

  // Interval between task reconciliation operations
  val reconciliationInterval =
    Duration(config.reconciliationInterval(), MILLISECONDS)

  // Time to wait before trying to scale apps after driver starts
  val scaleAppsInitialDelay =
    Duration(config.scaleAppsInitialDelay(), MILLISECONDS)

  // Interval between attempts to scale apps
  val scaleAppsInterval =
    Duration(config.scaleAppsInterval(), MILLISECONDS)

  private[mesosphere] var timer = newTimer()

  // This is a little ugly as we are using a mutable variable. But drivers can't
  // be reused (i.e. once stopped they can't be started again. Thus,
  // we have to allocate a new driver before each run or after each stop.
  var driver: Option[SchedulerDriver] = None

  implicit val timeout: Timeout = 5.seconds

  protected def newTimer() = new Timer("marathonSchedulerTimer")

  def deploy(plan: DeploymentPlan, force: Boolean = false): Future[Done] = {
    logger.debug(s"Forwarding new deployment plan with planId=${plan.id}, force=$force to the MarathonSchedulerActor")
    val future: Future[Any] = PromiseActor.askWithoutTimeout(system, schedulerActor, Deploy(plan, force))
    future.map {
      case DeploymentStarted(_) => Done
      case DeploymentFailed(_, t) => throw t
    }
  }

  def cancelDeployment(plan: DeploymentPlan): Unit =
    schedulerActor ! CancelDeployment(plan)

  def listAppVersions(appId: AbsolutePathId): Seq[Timestamp] =
    Await.result(groupManager.appVersions(appId).map(Timestamp(_)).runWith(Sink.seq), config.zkTimeoutDuration)

  def listRunningDeployments(): Future[Seq[DeploymentStepInfo]] =
    deploymentManager.list()

  def getApp(appId: AbsolutePathId, version: Timestamp): Option[AppDefinition] = {
    Await.result(groupManager.appVersion(appId, version.toOffsetDateTime), config.zkTimeoutDuration)
  }

  //Begin Service interface

  override def startUp(): Unit = {
    logger.info("Starting up")
    super.startUp()
  }

  override def run(): Unit = {
    logger.info("Beginning run")

    // The first thing we do is offer our leadership.
    electionService.offerLeadership(this)

    // Block on the latch which will be countdown only when shutdown has been
    // triggered. This is to prevent run()
    // from exiting.
    scala.concurrent.blocking {
      isRunningLatch.await()
    }

    logger.info("Completed run")
  }

  override def triggerShutdown(): Unit =
    synchronized {
      logger.info("Shutdown triggered")

      electionService.abdicateLeadership()
      stopDriver()

      logger.info("Cancelling timer")
      timer.cancel()

      // The countdown latch blocks run() from exiting. Counting down the latch removes the block.
      logger.info("Removing the blocking of run()")
      isRunningLatch.countDown()

      super.triggerShutdown()
    }

  private[this] def stopDriver(): Unit =
    synchronized {
      // many are the assumptions concerning when this is invoked. see startLeadership, stopLeadership,
      // triggerShutdown.
      logger.info("Stopping driver")

      // Stopping the driver will cause the driver run() method to return.
      driver.foreach(_.stop(true)) // failover = true

      // signals that the driver was stopped manually (as opposed to crashing mid-process)
      driver = None
    }

  //End Service interface

  //Begin ElectionCandidate interface

  override def startLeadership(): Unit =
    synchronized {
      logger.info("As new leader running the driver")

      // allow interactions with the persistence store
      persistenceStore.markOpen()

      // Before reading to and writing from the storage, let's ensure that
      // no stale values are read from the persistence store.
      // Although in case of ZK it is done at the time of creation of CuratorZK,
      // it is better to be safe than sorry.
      Await.result(persistenceStore.sync(), Duration.Inf)

      refreshCachesAndDoMigration()

      // run all pre-driver callbacks
      logger.info(s"""Call preDriverStarts callbacks on ${prePostDriverCallbacks.mkString(", ")}""")
      Await.result(
        Future.sequence(prePostDriverCallbacks.map(_.preDriverStarts)),
        config.onElectedPrepareTimeout().millis
      )
      logger.info("Finished preDriverStarts callbacks")

      // start all leadership coordination actors
      Await.result(leadershipCoordinator.prepareForStart(), config.maxActorStartupTime().milliseconds)

      // create new driver
      driver = Some(driverFactory.createDriver())

      // start timers
      schedulePeriodicOperations()

      // We have to start the Heartbeat monitor even before we're successfully registered, since in rare occasions driver
      // can hang forever trying to connect to Mesos (or doing some other driver work). In this case we also want
      // to suicide after not receiving any messages for a while.
      driver.foreach(heartbeatMonitor.activate(_))

      // The following block asynchronously runs the driver. Note that driver.run()
      // blocks until the driver has been stopped (or aborted).
      Future {
        scala.concurrent.blocking {
          driver.foreach(_.run())
        }
      } onComplete { result =>
        synchronized {

          logger.info(s"Driver future completed with result=$result.")
          result match {
            case Failure(t) => logger.error("Exception while running driver", t)
            case _ =>
          }

          // ONLY do this if there's some sort of driver crash: avoid invoking abdication logic if
          // the driver was stopped via stopDriver. stopDriver only happens when
          //   1. we're being terminated (and have already abdicated)
          //   2. we've lost leadership (no need to abdicate if we've already lost)
          driver.foreach { _ =>
            electionService.abdicateLeadership()
          }

          driver = None

          logger.info(s"Call postDriverRuns callbacks on ${prePostDriverCallbacks.mkString(", ")}")
          Await.result(Future.sequence(prePostDriverCallbacks.map(_.postDriverTerminates)), config.zkTimeoutDuration)
          logger.info("Finished postDriverRuns callbacks")
        }
      }
    }

  private def refreshCachesAndDoMigration(): Unit = {
    // We might not need to invalidate the group cache before migration, but it doesn't hurt. After migration we
    // certainly want to make sure the migrated state is reloaded

    // refresh group repository cache
    Await.result(groupManager.invalidateGroupCache(), Duration.Inf)

    // execute tasks, only the leader is allowed to
    migration.migrate()

    // refresh group repository again - migration or restore might changed zk state, this needs to be re-loaded
    Await.result(groupManager.invalidateAndRefreshGroupCache(), Duration.Inf)
  }

  override def stopLeadership(): Unit =
    synchronized {
      // invoked by election service upon loss of leadership (state transitioned to Idle)
      logger.info("Lost leadership")

      // disallow any interaction with the persistence storage
      persistenceStore.markClosed()

      leadershipCoordinator.stop()

      val oldTimer = timer
      timer = newTimer()
      oldTimer.cancel()

      driver.foreach { driverInstance =>
        heartbeatMonitor.deactivate(driverInstance)
        // Our leadership has been defeated. Thus, stop the driver.
        stopDriver()
      }
    }

  //End ElectionDelegate interface

  private def schedulePeriodicOperations(): Unit =
    synchronized {
      timer.schedule(
        new TimerTask {
          def run(): Unit = {
            if (electionService.isLeader) {
              schedulerActor ! ScaleRunSpecs
            } else logger.info("Not leader therefore not scaling apps")
          }
        },
        scaleAppsInitialDelay.toMillis,
        scaleAppsInterval.toMillis
      )

      timer.schedule(
        new TimerTask {
          def run(): Unit = {
            if (electionService.isLeader) {
              schedulerActor ! ReconcileTasks
              schedulerActor ! ReconcileHealthChecks
            } else logger.info("Not leader therefore not reconciling tasks")
          }
        },
        reconciliationInitialDelay.toMillis,
        reconciliationInterval.toMillis
      )
    }
}
