/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.spark.scheduler

import scala.collection.mutable.HashSet

import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.util.CallSite

/**
  * A stage is a set of parallel tasks all computing the same function that need to run as part
  * of a Spark job, where all the tasks have the same shuffle dependencies. Each DAG of tasks run
  * by the scheduler is split up into stages at the boundaries where shuffle occurs, and then the
  * DAGScheduler runs these stages in topological order.
  *
  * Each Stage can either be a shuffle map stage, in which case its tasks' results are input for
  * other stage(s), or a result stage, in which case its tasks directly compute a Spark action
  * (e.g. count(), save(), etc) by running a function on an RDD. For shuffle map stages, we also
  * track the nodes that each output partition is on.
  *
  * Each Stage also has a firstJobId, identifying the job that first submitted the stage.  When FIFO
  * scheduling is used, this allows Stages from earlier jobs to be computed first or recovered
  * faster on failure.
  * 一个阶段是一组独立的任务，他们都在计算同一个函数，需要运行作为Spark作业的一部分，其中所有的任务都具有相同的随机依赖关系。
  * 调度程序运行的每一个DAG都被分成不同的阶段，在发生shuffle的地方，然后DAGScheduler以拓扑顺序运行这些阶段。
  * 每个阶段可以是一个shuffle map stage，在这种情况下，其任务的结果将作为另一个stage或result stage的输入，在这种情况下，
  * 它的任务直接计算触发job的action操作（例如count（），save（） ，等等）。对于shuffle map stages，还会跟踪每个输出partition所在的节点。
  * 每个stage还有一个jobId，用于识别第一次提交stage的job。当使用FIFO调度时，这允许首先计算较早jobs的stage，或者在故障时恢复的更快。
  * callSite提供与stage相关的用户代码中的位置。对于shuffle map stage，callSite给出创建RDD正在被shuffle的用户代码。
  * 对于result stage，callSite给出执行相关action操作的用户代码（例如count（））。
  * A single stage可以由多次尝试组成。在这种情况下，将为每次尝试更新latestInfo字段。
  * Finally, a single stage can be re-executed in multiple attempts due to fault recovery. In that
  * case, the Stage object will track multiple StageInfo objects to pass to listeners or the web UI.
  * The latest one will be accessible through latestInfo.
  *
  * @param id         Unique stage ID
  * @param rdd        RDD that this stage runs on: for a shuffle map stage, it's the RDD we run map tasks
  *                   on, while for a result stage, it's the target RDD that we ran an action on
  * @param numTasks   Total number of tasks in stage; result stages in particular may not need to
  *                   compute all partitions, e.g. for first(), lookup(), and take().
  * @param parents    List of stages that this stage depends on (through shuffle dependencies).
  * @param firstJobId ID of the first job this stage was part of, for FIFO scheduling.
  * @param callSite   Location in the user program associated with this stage: either where the target
  *                   RDD was created, for a shuffle map stage, or where the action for a result stage was called.
  **/
private[scheduler] abstract class Stage(
                                         val id: Int,
                                         val rdd: RDD[_],
                                         val numTasks: Int,
                                         val parents: List[Stage],
                                         val firstJobId: Int,
                                         val callSite: CallSite)
  extends Logging {

  val numPartitions = rdd.partitions.length

  /** Set of jobs that this stage belongs to. */
  val jobIds = new HashSet[Int]

  val pendingPartitions = new HashSet[Int]

  /** The ID to use for the next new attempt for this stage. */
  private var nextAttemptId: Int = 0

  val name: String = callSite.shortForm
  val details: String = callSite.longForm

  /**
    * Pointer to the [StageInfo] object for the most recent attempt. This needs to be initialized
    * here, before any attempts have actually been created, because the DAGScheduler uses this
    * StageInfo to tell SparkListeners when a job starts (which happens before any stage attempts
    * have been created).
    * 最新 的[ StageInfo] object 指针， 需要 被 初始化， 任何 attempts 都是 被 创造 出来 的，
    * 因为 DAGScheduler 使用 StageInfo 告诉 SparkListeners 工作 何时 开始（ 即发 生前 的 任何 阶段 已经 创建）
    * * */

  private var _latestInfo: StageInfo = StageInfo.fromStage(this, nextAttemptId)

  /**
    * Set of stage attempt IDs that have failed with a FetchFailure. We keep track of these
    * failures in order to avoid endless retries if a stage keeps failing with a FetchFailure.
    * We keep track of each attempt ID that has failed to avoid recording duplicate failures if
    * multiple tasks from the same stage attempt fail (SPARK-5945).
    * 设置 stage attempt IDs当失败时可以读取失败信息， 跟踪这些失败，为了避免无休止地重复失败
    * 跟踪每一次attempt ID， 以便避免记录重复故障，如果从同一stage创建多任务失败

    */
  private val fetchFailedAttemptIds = new HashSet[Int]

  private[scheduler] def clearFailures(): Unit = {
    fetchFailedAttemptIds.clear()
  }

  /**
    * Check whether we should abort the failedStage due to multiple consecutive fetch failures.
    *
    * This method updates the running set of failed stage attempts and returns
    * true if the number of failures exceeds the allowable number of failures.
    * 检查我们是否应该中止由于连续多次读取失败的stage。该方法更新失败stage attempts的运行集，
    * 如果失败次数超过最大的故障值，则返回true;
    */
  private[scheduler] def failedOnFetchAndShouldAbort(stageAttemptId: Int): Boolean = {
    fetchFailedAttemptIds.add(stageAttemptId)
    fetchFailedAttemptIds.size >= Stage.MAX_CONSECUTIVE_FETCH_FAILURES
  }

  /** Creates a new attempt for this stage by creating a new StageInfo with a new attempt ID. */
  def makeNewStageAttempt(
                           numPartitionsToCompute: Int,
                           taskLocalityPreferences: Seq[Seq[TaskLocation]] = Seq.empty): Unit = {
    val metrics = new TaskMetrics
    metrics.register(rdd.sparkContext)
    _latestInfo = StageInfo.fromStage(
      this, nextAttemptId, Some(numPartitionsToCompute), metrics, taskLocalityPreferences)
    nextAttemptId += 1
  }

  /** Returns the StageInfo for the most recent attempt for this stage. */
  def latestInfo: StageInfo = _latestInfo

  override final def hashCode(): Int = id

  override final def equals(other: Any): Boolean = other match {
    case stage: Stage => stage != null && stage.id == id
    case _ => false
  }

  /** Returns the sequence of partition ids that are missing (i.e. needs to be computed). */
  def findMissingPartitions(): Seq[Int]
}

private[scheduler] object Stage {
  // The number of consecutive failures allowed before a stage is aborted
  val MAX_CONSECUTIVE_FETCH_FAILURES = 4
}
