/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
*/
package kafka.controller

import kafka.api.LeaderAndIsr
import kafka.common.StateChangeFailedException
import kafka.server.KafkaConfig
import kafka.utils.Logging
import kafka.zk.{KafkaZkClient, TopicPartitionStateZNode}
import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.KeeperException.Code

import scala.collection.mutable

/**
 * 状态机一般用在事件处理中，并且事件会有多种状态。当事件发生变化时，会触发对应的事件处理动作。Kafka控制启动状态机时有下面特点：
 * 1、分区状态机和副本状态机需要获取集群中所有分区和副本，因此需要先初始化上下文后，才能启动状态机。
 * 2、分区包含了多个副本，只有当集群中所有的副本初始化好之后，才可以初始化分区状态机
 * https://kukudi.blog.csdn.net/article/details/105256554
 *
 * 1. NewReplica        : 当分区重分配时，控制器可以创建一个新副本。这种状态下该副本只能作为follower，它可以是 Replica 删除后的一个临时状态，有效前置状态是 NonExistentReplica；
 * 2. OnlineReplica     : 当副本被分配到指定的Partition上，并且副本完成创建，那么它将会被置为这个状态。
 *                      在这个状态下，分区既可以作为Leader也可以作为Follower，有效前置状态是 NewReplica、OnlineReplica 或 OfflineReplica；
 * 3. OfflineReplica    : 如果副本所在的Broker挂掉，副本将会置为这个状态。有效前置状态是 NewReplica、OfflineReplica 或 OnlineReplica；
 * 4. ReplicaDeletionStarted: 副本开始删除时被置为的状态，有效前置状态是 OfflineReplica；
 * 5. ReplicaDeletionSuccessful: 如果部分在删除时没有错误信息，它将被置为这个状态。表示该副本的数据已经从Broker清除了，有效前置状态是 ReplicaDeletionStarted；
 * 6. ReplicaDeletionIneligible: 如果副本删除失败，会转移到这个状态。表示非法删除，也就是删除不成功，有效前置状态是 ReplicaDeletionStarted；
 * 7. NonExistentReplica: 如果副本删除成功，将被转移到这个状态。有效前置状态是：ReplicaDeletionSuccessful。
 */
class ReplicaStateMachine(config: KafkaConfig,
                          stateChangeLogger: StateChangeLogger,
                          controllerContext: ControllerContext,
                          topicDeletionManager: TopicDeletionManager,
                          zkClient: KafkaZkClient,
                          replicaState: mutable.Map[PartitionAndReplica, ReplicaState],
                          controllerBrokerRequestBatch: ControllerBrokerRequestBatch) extends Logging {
  private val controllerId = config.brokerId

  this.logIdent = s"[ReplicaStateMachine controllerId=$controllerId] "

  /**
   * controller选举成功后，onControllerFailover() --> replicaStateMachine.startup()
   */
  def startup() {
    // 初始化ZK上所有的副本状态信息（副本存活设置为Online，不存活的设置为ReplicaDeletionIneligible
    initializeReplicaState()
    handleStateChanges(controllerContext.allLiveReplicas().toSeq, OnlineReplica)
  }

  /**
   * Invoked on controller shutdown.
   */
  def shutdown() {
    replicaState.clear()
    info("Stopped replica state machine")
  }

  /**
   *  初始化所有分区副本的状态
   */
  private def initializeReplicaState() {
    // 循环所有分区
    controllerContext.allPartitions.foreach { partition =>
      val replicas = controllerContext.partitionReplicaAssignment(partition)
      replicas.foreach { replicaId =>
        val partitionAndReplica = PartitionAndReplica(partition, replicaId)
        // 如果副本存活，将状态设置为OnlineReplica
        if (controllerContext.isReplicaOnline(replicaId, partition))
          replicaState.put(partitionAndReplica, OnlineReplica)
        else
          // 不存活的副本设置为ReplicaDeletionIneligible
          replicaState.put(partitionAndReplica, ReplicaDeletionIneligible)
      }
    }
  }

  /**
   * 副本状态机变化处理方法对多个副本的状态改变以批量请求的方式发送给多个Broker
   * @param replicas
   * @param targetState
   * @param callbacks
   */
  def handleStateChanges(replicas: Seq[PartitionAndReplica], targetState: ReplicaState,
                         callbacks: Callbacks = new Callbacks()): Unit = {
    if (replicas.nonEmpty) {
      try {
        controllerBrokerRequestBatch.newBatch()
        // 处理状态请求
        replicas.groupBy(_.replica).map { case (replicaId, replicas) =>
          val partitions = replicas.map(_.topicPartition)
          doHandleStateChanges(replicaId, partitions, targetState, callbacks)
        }
        // 向Broker发送响应请求
        controllerBrokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch)
      } catch {
        case e: Throwable => error(s"Error while moving some replicas to $targetState state", e)
      }
    }
  }

  /**
   * This API exercises the replica's state machine. It ensures that every state transition happens from a legal
   * previous state to the target state. Valid state transitions are:
   * NonExistentReplica --> NewReplica
   * --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
   *   partition to every live broker
   *
   * NewReplica -> OnlineReplica
   * --add the new replica to the assigned replica list if needed
   *
   * OnlineReplica,OfflineReplica -> OnlineReplica
   * --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
   *   partition to every live broker
   *
   * NewReplica,OnlineReplica,OfflineReplica,ReplicaDeletionIneligible -> OfflineReplica
   * --send StopReplicaRequest to the replica (w/o deletion)
   * --remove this replica from the isr and send LeaderAndIsr request (with new isr) to the leader replica and
   *   UpdateMetadata request for the partition to every live broker.
   *
   * OfflineReplica -> ReplicaDeletionStarted
   * --send StopReplicaRequest to the replica (with deletion)
   *
   * ReplicaDeletionStarted -> ReplicaDeletionSuccessful
   * -- mark the state of the replica in the state machine
   *
   * ReplicaDeletionStarted -> ReplicaDeletionIneligible
   * -- mark the state of the replica in the state machine
   *
   * ReplicaDeletionSuccessful -> NonExistentReplica
   * -- remove the replica from the in memory partition replica assignment cache
   *
   * @param replicaId The replica for which the state transition is invoked
   * @param partitions The partitions on this replica for which the state transition is invoked
   * @param targetState The end state that the replica should be moved to
   */
  private def doHandleStateChanges(replicaId: Int, partitions: Seq[TopicPartition], targetState: ReplicaState,
                                   callbacks: Callbacks): Unit = {
    val replicas = partitions.map(partition => PartitionAndReplica(partition, replicaId))
    replicas.foreach(replica => replicaState.getOrElseUpdate(replica, NonExistentReplica))
    val (validReplicas, invalidReplicas) = replicas.partition(replica => isValidTransition(replica, targetState))
    invalidReplicas.foreach(replica => logInvalidTransition(replica, targetState))
    targetState match {
      /**
       * 1、校验副本的前置状态，只有处于 NonExistentReplica 状态的副本才能转移到 NewReplica 状态；
       * 2、从ZK中获取该分区的 LeaderIsrAndControllerEpoch 信息；
       * 3、如果获取不到上述信息，直接将该副本的状态设置为 NewReplica，然后结束流程（新建分区时，副本可能处于这个状态，该分区的所有副本是没有 LeaderAndIsr 信息的）；
       * 4、获取到分区的 LeaderIsrAndControllerEpoch 信息，如果发现该分区的 leader 是当前副本，那么就抛出 StateChangeFailedException 异常，因为处在这个状态的副本是不能被选举为 leader 的；
       * 5、获取到了分区的 LeaderIsrAndControllerEpoch 信息，并且分区的 leader 不是当前副本，那么向该分区的所有副本添加一个 LeaderAndIsr 请求（添加 LeaderAndIsr 请求时，同时也会向所有的 Broker 都添加一个 UpdateMetadata 请求）；
       * 6、最后将该副本的状态转移成 NewReplica，然后结束流程。
       */
      case NewReplica =>
        validReplicas.foreach { replica =>
          val partition = replica.topicPartition
          controllerContext.partitionLeadershipInfo.get(partition) match {
            // 从ZK获取分区的 leaderAndIsr 信息
            case Some(leaderIsrAndControllerEpoch) =>
              if (leaderIsrAndControllerEpoch.leaderAndIsr.leader == replicaId) {
                // NewReplica 状态的副本不能作为分区Leader
                val exception = new StateChangeFailedException(s"Replica $replicaId for partition $partition cannot be moved to NewReplica state as it is being requested to become leader")
                logFailedStateChange(replica, replicaState(replica), OfflineReplica, exception)
              } else {
                // 向replicaId的副本发送 LeaderAndIsr请求，并同时向所有Broker发送UpdateMetadata请求
                controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId),
                                  replica.topicPartition,
                                  leaderIsrAndControllerEpoch,
                                  controllerContext.partitionReplicaAssignment(replica.topicPartition),
                                  isNew = true)
                logSuccessfulTransition(replicaId, partition, replicaState(replica), NewReplica)
                // 修改ControllerContext中的副本状态
                replicaState.put(replica, NewReplica)
              }
            case None =>
              // 如副本没有LeaderAndIsr信息，则等待分区Leader选举完成
              logSuccessfulTransition(replicaId, partition, replicaState(replica), NewReplica)
              replicaState.put(replica, NewReplica)
          }
        }

        /**
         * 副本正常工作时的状态，此时的副本既可以是 leader 也可以是 follower
         * 当副本处在 NewReplica、OnlineReplica、OfflineReplica 状态时，是可以转移到 OnlineReplica 状态的。代码中的实现可以分为如下2种情况：
         * A、NewReplica –> OnlineReplica
         *     1) 从上下文中的 partitionReplicaAssignment 中获取分区的副本列表；
         *    2) 如果副本不在列表中，那么将其添加到分区副本列表中；
         *    3) 将副本的状态变更为 OnlineReplica 状态。
         * B、OnlineReplica | OfflineReplica –> OnlineReplica
         *    1) 从上下文中的 partitionLeadershipInfo 获取分区的 LeaderAndIsr 信息；
         *     2) 如果该信息存在，那么就向这个副本所在的 broker 添加这个分区的 LeaderAndIsr 请求，并将副本的状态设置为 OnlineReplica；
         *    3) 如果信息不存在，不做任何处理；
         *    4) 更新副本的状态为 OnlineReplica。
         */
      case OnlineReplica =>
        validReplicas.foreach { replica =>
          val partition = replica.topicPartition
          replicaState(replica) match {
            case NewReplica =>
              // NewReplica --> OnlineReplica
              val assignment = controllerContext.partitionReplicaAssignment(partition)
              // 如副本不在分区副本集合中，添加进集合（正常情况下不会出现）
              if (!assignment.contains(replicaId)) {
                controllerContext.updatePartitionReplicaAssignment(partition, assignment :+ replicaId)
              }
            case _ =>
              //  OnlineReplica | OfflineReplica --> OnlineReplica
              controllerContext.partitionLeadershipInfo.get(partition) match {
                case Some(leaderIsrAndControllerEpoch) =>
                  // 如果该副本的 LeaderIsrAndControllerEpoch 信息存在，那么就更新副本的状态，并发送相应的请求
                  controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId),
                              replica.topicPartition,
                              leaderIsrAndControllerEpoch,
                              controllerContext.partitionReplicaAssignment(partition),
                              isNew = false)
                case None =>
                  //  表示分区不是OnlinePartition状态，也就是Broker没有为分区启动log，并且没有分区高水位的值
              }
          }
          logSuccessfulTransition(replicaId, partition, replicaState(replica), OnlineReplica)
          replicaState.put(replica, OnlineReplica)
        }

      /**
       * 1) 校验前置状态，只有副本在 NewReplica、OnlineReplica、OfflineReplica 状态时，才可以转换到这种状态；
       * 2) 向该副本所在Broker发送 StopReplica 请求（deletePartition = false）；
       * 3) 将副本列表拆分为：有LeadershipInfo和无LeadershipInfo两部分
       * 4) 有LeadershipInfo的，调用 removeReplicaFromIsr()，将该副本从分区的 isr 移除。然后向该分区其他副本发送 LeaderAndIsr 请求；
       * 5) 无LeadershipInfo的，向所有存活Broker发送 UpdateMetadata请求
       * 6) 更新副本的状态为 OfflineReplica。
       */
      case OfflineReplica =>
        validReplicas.foreach { replica =>
          // 发送 StopReplica 请求给该副本，先停止副本同步
          controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId), replica.topicPartition,
            deletePartition = false, (_, _) => ())
        }
        // 将副本列表拆分为：有LeadershipInfo和无LeadershipInfo两部分
        val (replicasWithLeadershipInfo, replicasWithoutLeadershipInfo) = validReplicas.partition { replica =>
          controllerContext.partitionLeadershipInfo.contains(replica.topicPartition)
        }
        // 有LeadershipInfo的，控制器将副本从ISR中移除
        val updatedLeaderIsrAndControllerEpochs = removeReplicasFromIsr(replicaId, replicasWithLeadershipInfo.map(_.topicPartition))
        updatedLeaderIsrAndControllerEpochs.foreach { case (partition, leaderIsrAndControllerEpoch) =>
          if (!topicDeletionManager.isPartitionToBeDeleted(partition)) {
            val recipients = controllerContext.partitionReplicaAssignment(partition).filterNot(_ == replicaId)
            // 向该分区其他副本发送 LeaderAndIsr 请求
            controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(recipients,
                          partition,
                          leaderIsrAndControllerEpoch,
                          controllerContext.partitionReplicaAssignment(partition),
                          isNew = false)
          }
          val replica = PartitionAndReplica(partition, replicaId)
          logSuccessfulTransition(replicaId, partition, replicaState(replica), OfflineReplica)
          replicaState.put(replica, OfflineReplica)
        }
        // 无LeadershipInfo的，向所有存活Broker发送 UpdateMetadata请求
        replicasWithoutLeadershipInfo.foreach { replica =>
          logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), OfflineReplica)
          replicaState.put(replica, OfflineReplica)
        }

      /**
       * 该状态是副本删除过程的开始状态，
       * 1）校验前置状态，副本前置状态只能是 OfflineReplica；
       * 2）更新该副本的状态为 ReplicaDeletionStarted；
       * 3）向该副本发送 StopReplica 请求（deletePartition = true），收到这请求后，broker 会从物理存储上删除这个副本的数据内容；
       */
      case ReplicaDeletionStarted =>
        validReplicas.foreach { replica =>
          logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), ReplicaDeletionStarted)
          replicaState.put(replica, ReplicaDeletionStarted)
          // 送 StopReplica 请求给该副本,并设置 deletePartition=true
          controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId),
                  replica.topicPartition,
                  deletePartition = true,
                  callbacks.stopReplicaResponseCallback)
        }

      /**
       * 该状态是副本删除失败的状态：
       * 1）校验前置状态，副本的前置状态只能是 ReplicaDeletionStarted；
       * 2）更新该副本的状态为 ReplicaDeletionIneligible。
       */
      case ReplicaDeletionIneligible =>
        validReplicas.foreach { replica =>
          logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), ReplicaDeletionIneligible)
          replicaState.put(replica, ReplicaDeletionIneligible)
        }

      /**
       * 该状态是副本删除成功的状态：
       * 1）检验前置状态，副本的前置状态只能是 ReplicaDeletionStarted；
       * 2）更新该副本的状态为 ReplicaDeletionSuccessful。
       */
      case ReplicaDeletionSuccessful =>
        validReplicas.foreach { replica =>
          logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), ReplicaDeletionSuccessful)
          replicaState.put(replica, ReplicaDeletionSuccessful)
        }

      /**
       * 该状态是副本已经被完全删除，不存在的状态：
       * 1）检验前置状态，副本的前置状态只能是 ReplicaDeletionSuccessful；
       * 2）在控制器的 partitionReplicaAssignment 删除分区对应的副本信息；
       * 3）从控制器上下文和副本状态机中将这个副本删除。
       */
      case NonExistentReplica =>
        validReplicas.foreach { replica =>
          val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(replica.topicPartition)
          // 从控制器上下文和副本状态机中清除这个副本的信息
          controllerContext.updatePartitionReplicaAssignment(replica.topicPartition, currentAssignedReplicas.filterNot(_ == replica.replica))
          logSuccessfulTransition(replicaId, replica.topicPartition, replicaState(replica), NonExistentReplica)
          replicaState.remove(replica)
        }
    }
  }

  /**
   * Repeatedly attempt to remove a replica from the isr of multiple partitions until there are no more remaining partitions
   * to retry.
   * @param replicaId The replica being removed from isr of multiple partitions
   * @param partitions The partitions from which we're trying to remove the replica from isr
   * @return The updated LeaderIsrAndControllerEpochs of all partitions for which we successfully removed the replica from isr.
   */
  private def removeReplicasFromIsr(replicaId: Int, partitions: Seq[TopicPartition]):
  Map[TopicPartition, LeaderIsrAndControllerEpoch] = {
    var results = Map.empty[TopicPartition, LeaderIsrAndControllerEpoch]
    var remaining = partitions
    while (remaining.nonEmpty) {
      val (successfulRemovals, removalsToRetry, failedRemovals) = doRemoveReplicasFromIsr(replicaId, remaining)
      results ++= successfulRemovals
      remaining = removalsToRetry
      failedRemovals.foreach { case (partition, e) =>
        val replica = PartitionAndReplica(partition, replicaId)
        logFailedStateChange(replica, replicaState(replica), OfflineReplica, e)
      }
    }
    results
  }

  /**
   * Try to remove a replica from the isr of multiple partitions.
   * Removing a replica from isr updates partition state in zookeeper.
   *
   * @param replicaId The replica being removed from isr of multiple partitions
   * @param partitions The partitions from which we're trying to remove the replica from isr
   * @return A tuple of three values:
   *         1. The updated LeaderIsrAndControllerEpochs of all partitions for which we successfully removed the replica from isr.
   *         2. The partitions that we should retry due to a zookeeper BADVERSION conflict. Version conflicts can occur if
   *         the partition leader updated partition state while the controller attempted to update partition state.
   *         3. Exceptions corresponding to failed removals that should not be retried.
   */
  private def doRemoveReplicasFromIsr(replicaId: Int, partitions: Seq[TopicPartition]):
  (Map[TopicPartition, LeaderIsrAndControllerEpoch],
    Seq[TopicPartition],
    Map[TopicPartition, Exception]) = {
    val (leaderAndIsrs, partitionsWithNoLeaderAndIsrInZk, failedStateReads) = getTopicPartitionStatesFromZk(partitions)
    val (leaderAndIsrsWithReplica, leaderAndIsrsWithoutReplica) = leaderAndIsrs.partition { case (_, leaderAndIsr) => leaderAndIsr.isr.contains(replicaId) }
    val adjustedLeaderAndIsrs = leaderAndIsrsWithReplica.mapValues { leaderAndIsr =>
      val newLeader = if (replicaId == leaderAndIsr.leader) LeaderAndIsr.NoLeader else leaderAndIsr.leader
      val adjustedIsr = if (leaderAndIsr.isr.size == 1) leaderAndIsr.isr else leaderAndIsr.isr.filter(_ != replicaId)
      leaderAndIsr.newLeaderAndIsr(newLeader, adjustedIsr)
    }
    val UpdateLeaderAndIsrResult(successfulUpdates, updatesToRetry, failedUpdates) = zkClient.updateLeaderAndIsr(
      adjustedLeaderAndIsrs, controllerContext.epoch)
    val exceptionsForPartitionsWithNoLeaderAndIsrInZk = partitionsWithNoLeaderAndIsrInZk.flatMap { partition =>
      if (!topicDeletionManager.isPartitionToBeDeleted(partition)) {
        val exception = new StateChangeFailedException(s"Failed to change state of replica $replicaId for partition $partition since the leader and isr path in zookeeper is empty")
        Option(partition -> exception)
      } else None
    }.toMap
    val leaderIsrAndControllerEpochs = (leaderAndIsrsWithoutReplica ++ successfulUpdates).map { case (partition, leaderAndIsr) =>
      val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerContext.epoch)
      controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
      partition -> leaderIsrAndControllerEpoch
    }
    (leaderIsrAndControllerEpochs, updatesToRetry, failedStateReads ++ exceptionsForPartitionsWithNoLeaderAndIsrInZk ++ failedUpdates)
  }

  /**
   * Gets the partition state from zookeeper
   * @param partitions the partitions whose state we want from zookeeper
   * @return A tuple of three values:
   *         1. The LeaderAndIsrs of partitions whose state we successfully read from zookeeper
   *         2. The partitions that had no leader and isr state in zookeeper. This happens if the controller
   *         didn't finish partition initialization.
   *         3. Exceptions corresponding to failed zookeeper lookups or states whose controller epoch exceeds our current epoch.
   */
  private def getTopicPartitionStatesFromZk(partitions: Seq[TopicPartition]):
  (Map[TopicPartition, LeaderAndIsr],
    Seq[TopicPartition],
    Map[TopicPartition, Exception]) = {
    val leaderAndIsrs = mutable.Map.empty[TopicPartition, LeaderAndIsr]
    val partitionsWithNoLeaderAndIsrInZk = mutable.Buffer.empty[TopicPartition]
    val failed = mutable.Map.empty[TopicPartition, Exception]
    val getDataResponses = try {
      zkClient.getTopicPartitionStatesRaw(partitions)
    } catch {
      case e: Exception =>
        partitions.foreach(partition => failed.put(partition, e))
        return (leaderAndIsrs.toMap, partitionsWithNoLeaderAndIsrInZk, failed.toMap)
    }
    getDataResponses.foreach { getDataResponse =>
      val partition = getDataResponse.ctx.get.asInstanceOf[TopicPartition]
      if (getDataResponse.resultCode == Code.OK) {
        val leaderIsrAndControllerEpochOpt = TopicPartitionStateZNode.decode(getDataResponse.data, getDataResponse.stat)
        if (leaderIsrAndControllerEpochOpt.isEmpty) {
          partitionsWithNoLeaderAndIsrInZk += partition
        } else {
          val leaderIsrAndControllerEpoch = leaderIsrAndControllerEpochOpt.get
          if (leaderIsrAndControllerEpoch.controllerEpoch > controllerContext.epoch) {
            val exception = new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
              s"means the current controller with epoch ${controllerContext.epoch} went through a soft failure and another " +
              s"controller was elected with epoch ${leaderIsrAndControllerEpoch.controllerEpoch}. Aborting state change by this controller")
            failed.put(partition, exception)
          } else {
            leaderAndIsrs.put(partition, leaderIsrAndControllerEpoch.leaderAndIsr)
          }
        }
      } else if (getDataResponse.resultCode == Code.NONODE) {
        partitionsWithNoLeaderAndIsrInZk += partition
      } else {
        failed.put(partition, getDataResponse.resultException.get)
      }
    }
    (leaderAndIsrs.toMap, partitionsWithNoLeaderAndIsrInZk, failed.toMap)
  }

  def isAtLeastOneReplicaInDeletionStartedState(topic: String): Boolean = {
    controllerContext.replicasForTopic(topic).exists(replica => replicaState(replica) == ReplicaDeletionStarted)
  }

  def replicasInState(topic: String, state: ReplicaState): Set[PartitionAndReplica] = {
    replicaState.filter { case (replica, s) => replica.topic.equals(topic) && s == state }.keySet.toSet
  }

  def areAllReplicasForTopicDeleted(topic: String): Boolean = {
    controllerContext.replicasForTopic(topic).forall(replica => replicaState(replica) == ReplicaDeletionSuccessful)
  }

  def isAnyReplicaInState(topic: String, state: ReplicaState): Boolean = {
    replicaState.exists { case (replica, s) => replica.topic.equals(topic) && s == state}
  }

  private def isValidTransition(replica: PartitionAndReplica, targetState: ReplicaState) =
    targetState.validPreviousStates.contains(replicaState(replica))

  private def logSuccessfulTransition(replicaId: Int, partition: TopicPartition, currState: ReplicaState, targetState: ReplicaState): Unit = {
    stateChangeLogger.withControllerEpoch(controllerContext.epoch)
      .trace(s"Changed state of replica $replicaId for partition $partition from $currState to $targetState")
  }

  private def logInvalidTransition(replica: PartitionAndReplica, targetState: ReplicaState): Unit = {
    val currState = replicaState(replica)
    val e = new IllegalStateException(s"Replica $replica should be in the ${targetState.validPreviousStates.mkString(",")} " +
      s"states before moving to $targetState state. Instead it is in $currState state")
    logFailedStateChange(replica, currState, targetState, e)
  }

  private def logFailedStateChange(replica: PartitionAndReplica, currState: ReplicaState, targetState: ReplicaState, t: Throwable): Unit = {
    stateChangeLogger.withControllerEpoch(controllerContext.epoch)
      .error(s"Controller $controllerId epoch ${controllerContext.epoch} initiated state change of replica ${replica.replica} " +
        s"for partition ${replica.topicPartition} from $currState to $targetState failed", t)
  }
}

sealed trait ReplicaState {
  def state: Byte
  def validPreviousStates: Set[ReplicaState]
}

case object NewReplica extends ReplicaState {
  val state: Byte = 1
  val validPreviousStates: Set[ReplicaState] = Set(NonExistentReplica)
}

case object OnlineReplica extends ReplicaState {
  val state: Byte = 2
  val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible)
}

case object OfflineReplica extends ReplicaState {
  val state: Byte = 3
  val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible)
}

case object ReplicaDeletionStarted extends ReplicaState {
  val state: Byte = 4
  val validPreviousStates: Set[ReplicaState] = Set(OfflineReplica)
}

case object ReplicaDeletionSuccessful extends ReplicaState {
  val state: Byte = 5
  val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionStarted)
}

case object ReplicaDeletionIneligible extends ReplicaState {
  val state: Byte = 6
  val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionStarted)
}

case object NonExistentReplica extends ReplicaState {
  val state: Byte = 7
  val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionSuccessful)
}
