/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.spark.scheduler.cluster.k8s

import io.fabric8.kubernetes.api.model.{DoneablePod, Pod}
import io.fabric8.kubernetes.client.KubernetesClient
import io.fabric8.kubernetes.client.dsl.PodResource
import org.mockito.{ArgumentCaptor, Mock, MockitoAnnotations}
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito.{mock, never, times, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.BeforeAndAfter
import scala.collection.mutable

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.k8s.Config
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.Fabric8Aliases._
import org.apache.spark.deploy.k8s.KubernetesUtils._
import org.apache.spark.scheduler.ExecutorExited
import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils._

class ExecutorPodsLifecycleManagerSuite extends SparkFunSuite with BeforeAndAfter {

  private var namedExecutorPods: mutable.Map[String, PodResource[Pod, DoneablePod]] = _

  @Mock
  private var kubernetesClient: KubernetesClient = _

  @Mock
  private var podOperations: PODS = _

  @Mock
  private var schedulerBackend: KubernetesClusterSchedulerBackend = _

  private var snapshotsStore: DeterministicExecutorPodsSnapshotsStore = _
  private var eventHandlerUnderTest: ExecutorPodsLifecycleManager = _

  before {
    MockitoAnnotations.openMocks(this).close()
    snapshotsStore = new DeterministicExecutorPodsSnapshotsStore()
    namedExecutorPods = mutable.Map.empty[String, PodResource[Pod, DoneablePod]]
    when(schedulerBackend.getExecutorsWithRegistrationTs()).thenReturn(Map.empty[String, Long])
    when(kubernetesClient.pods()).thenReturn(podOperations)
    when(podOperations.withName(any(classOf[String]))).thenAnswer(namedPodsAnswer())
    eventHandlerUnderTest = new ExecutorPodsLifecycleManager(
      new SparkConf(),
      kubernetesClient,
      snapshotsStore)
    eventHandlerUnderTest.start(schedulerBackend)
  }

  test("When an executor reaches error states immediately, remove from the scheduler backend.") {
    val failedPod = failedExecutorWithoutDeletion(1)
    snapshotsStore.updatePod(failedPod)
    snapshotsStore.notifySubscribers()
    val msg = exitReasonMessage(1, failedPod)
    val expectedLossReason = ExecutorExited(1, exitCausedByApp = true, msg)
    verify(schedulerBackend).doRemoveExecutor("1", expectedLossReason)
    verify(namedExecutorPods(failedPod.getMetadata.getName)).delete()
  }

  test("Don't remove executors twice from Spark but remove from K8s repeatedly.") {
    val failedPod = failedExecutorWithoutDeletion(1)
    snapshotsStore.updatePod(failedPod)
    snapshotsStore.notifySubscribers()
    snapshotsStore.updatePod(failedPod)
    snapshotsStore.notifySubscribers()
    val msg = exitReasonMessage(1, failedPod)
    val expectedLossReason = ExecutorExited(1, exitCausedByApp = true, msg)
    verify(schedulerBackend, times(1)).doRemoveExecutor("1", expectedLossReason)
    verify(namedExecutorPods(failedPod.getMetadata.getName), times(2)).delete()
  }

  test("When the scheduler backend lists executor ids that aren't present in the cluster," +
    " remove those executors from Spark.") {
      when(schedulerBackend.getExecutorsWithRegistrationTs()).thenReturn(Map("1" -> 7L))
    val missingPodDelta =
      eventHandlerUnderTest.conf.get(Config.KUBERNETES_EXECUTOR_MISSING_POD_DETECT_DELTA)
    snapshotsStore.clock.advance(missingPodDelta + 7)
    snapshotsStore.replaceSnapshot(Seq.empty[Pod])
    snapshotsStore.notifySubscribers()
    verify(schedulerBackend, never()).doRemoveExecutor(any(), any())

    // 1 more millisecond and the accepted delta is over so the missing POD will be detected
    snapshotsStore.clock.advance(1)
    snapshotsStore.replaceSnapshot(Seq.empty[Pod])
    snapshotsStore.notifySubscribers()
    val msg = "The executor with ID 1 (registered at 7 ms) was not found in the cluster at " +
      "the polling time (30008 ms) which is after the accepted detect delta time (30000 ms) " +
      "configured by `spark.kubernetes.executor.missingPodDetectDelta`. The executor may have " +
      "been deleted but the driver missed the deletion event. Marking this executor as failed."
    val expectedLossReason = ExecutorExited(-1, exitCausedByApp = false, msg)
    verify(schedulerBackend).doRemoveExecutor("1", expectedLossReason)
  }

  test("Keep executor pods in k8s if configured.") {
    val failedPod = failedExecutorWithoutDeletion(1)
    eventHandlerUnderTest.conf.set(Config.KUBERNETES_DELETE_EXECUTORS, false)
    snapshotsStore.updatePod(failedPod)
    snapshotsStore.notifySubscribers()
    val msg = exitReasonMessage(1, failedPod)
    val expectedLossReason = ExecutorExited(1, exitCausedByApp = true, msg)
    verify(schedulerBackend).doRemoveExecutor("1", expectedLossReason)
    verify(namedExecutorPods(failedPod.getMetadata.getName), never()).delete()

    val podCaptor = ArgumentCaptor.forClass(classOf[Pod])
    verify(namedExecutorPods(failedPod.getMetadata.getName)).patch(podCaptor.capture())

    val pod = podCaptor.getValue()
    assert(pod.getMetadata().getLabels().get(SPARK_EXECUTOR_INACTIVE_LABEL) === "true")
  }

  private def exitReasonMessage(failedExecutorId: Int, failedPod: Pod): String = {
    val reason = Option(failedPod.getStatus.getReason)
    val message = Option(failedPod.getStatus.getMessage)
    s"""
       |The executor with id $failedExecutorId exited with exit code 1.
       |The API gave the following brief reason: ${reason.getOrElse("N/A")}
       |The API gave the following message: ${message.getOrElse("N/A")}
       |The API gave the following container statuses:
       |
       |${containersDescription(failedPod)}
      """.stripMargin
  }

  private def namedPodsAnswer(): Answer[PodResource[Pod, DoneablePod]] =
    (invocation: InvocationOnMock) => {
      val podName: String = invocation.getArgument(0)
      namedExecutorPods.getOrElseUpdate(
        podName, mock(classOf[PodResource[Pod, DoneablePod]]))
    }
}
