 1.Spark 原理之Master & Worker解析下Master 启动流程
   
   Master是RpcEndpoint，实现了 RpcEndpoint 接口
   Master的生命周期遵循 constructor -> onStart -> receive* -> onStop 的步骤
   Master 的 onStart 方法中最重要的事情是：执行恢复
   Master HA的实现方式：
       ZOOKEEPER。基于zookeeper的Active / Standby 模式。适用于生产模式，其基本原理是通过zookeeper来选
举一个Master，其他的Master处于Standby状态；
       FILESYSTEM。基于文件系统的单点恢复。主要用于开发或测试环境。为Spark提供目录保存spark Application
和worker的注册信息，一旦Master发生故障，可通过重新启动Master进程(start-master.sh)，恢复已运行的
Spark Application和Worker 注册信息；
       CUSTOM。允许用户自定义 HA 的实现，对于高级用户特别有用；
_。默应情况，未配置HA，不会持久化集群的数据，Master启动立即管理集群；
 
 2.Worker 启动流程
   
   小结：
        Master、Worker都是RpcEndpoint，实现了 RpcEndpoint 接口。主要任务收发、处理消息；
        Master、Worker的生命周期都遵循:constructor -> onStart -> receive* -> onStop
        在Master的onStart方法中最重要的事情是：执行恢复
        在Worker的onStart方法中最重要的事情是：向master注册
 
 3.模拟程序
package org.apache.spark.deploy

import org.apache.spark.SparkConf
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef, 
  RpcEnv, ThreadSafeRpcEndpoint}
import org.apache.spark.SecurityManager

class MyMaster(override val rpcEnv: RpcEnv,
               address: RpcAddress,
               val securityMgr: SecurityManager,
               val conf: SparkConf)
  extends ThreadSafeRpcEndpoint {
  println("MyMaster 主构造器运行")


  override def onStart(): Unit = {
    println("My Master is starting ... ....")
  }

  override def receive: PartialFunction[Any, Unit] = {
    case TestAdd(x, y) => println(s"x=$x, y=$y; x+y=${x+y}")
    // 接收注册信息，恢复以及注册
    case  RegisterWorker(id, workerHost, workerPort, workerRef, cores, memory, masterAddress) =>
      println(s"id = $id, workerHost =$workerHost, workerPort = $workerPort, " +
        s"workerRef = $workerRef, cores = $cores, memory = $memory, masterAddress = $masterAddress")
      workerRef.send(RegisteredWorker(self, masterAddress))

    case Heartbeat(workerId, worker) =>
      println(s"workerId = $workerId, worker = $worker, time = ${System.currentTimeMillis()}")

    case _ => println("MyMaster 接收到未知消息")
  }
}

object MyMaster {
  val SYSTEM_NAME = "MyMaster"
  val ENDPOINT_NAME = "Master"

  def main(argStrings: Array[String]) {
    val host = "localhost"
    val port = 10001
    val conf = new SparkConf

    val securityMgr = new SecurityManager(conf)
    val rpcEnv = RpcEnv.create(SYSTEM_NAME, host, port, conf, securityMgr)
    val masterEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME,
      new MyMaster(rpcEnv, rpcEnv.address, securityMgr, conf))
    masterEndpoint.send(TestAdd(10, 20))

    rpcEnv.awaitTermination()
  }
}

case class TestAdd(x: Int, y: Int)
case class RegisterWorker(
                           id: String,
                           host: String,
                           port: Int,
                           worker: RpcEndpointRef,
                           cores: Int,
                           memory: Int,
                           masterAddress: RpcAddress)
case class RegisteredWorker(
                             master: RpcEndpointRef,
                             masterAddress: RpcAddress)
case object SendHeartbeat
case class Heartbeat(workerId: String, worker: RpcEndpointRef)


package org.apache.spark.deploy

import java.text.SimpleDateFormat
import java.util.concurrent.TimeUnit
import java.util.{Date, Locale}
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef, 
  RpcEnv, ThreadSafeRpcEndpoint}
import org.apache.spark.util.{ThreadUtils, Utils}
import org.apache.spark.{SecurityManager, SparkConf}

class MyWorker(override val rpcEnv: RpcEnv,
               cores: Int,
               memory: Int,
               masterRpcAddresses: Array[RpcAddress],
               endpointName: String,
               val conf: SparkConf,
               val securityMgr: SecurityManager)
  extends ThreadSafeRpcEndpoint {
  println("MyWorker 主构造器运行")

  // 定义参数
  private val host = rpcEnv.address.host
  private val port = rpcEnv.address.port
  private def createDateFormat = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US)

  private val workerId = generateWorkerId()
  private def generateWorkerId(): String = {
    "worker-%s-%s-%d".format(createDateFormat.format(new Date), host, port)
  }

  private val forwordMessageScheduler =
    ThreadUtils.newDaemonSingleThreadScheduledExecutor("worker-forward-message-scheduler")
  // 发送心跳间隔时间，15s发送一次心跳
  private val HEARTBEAT_MILLIS = 60 * 1000 / 4

  private var master: Option[RpcEndpointRef] = None
  override def onStart(): Unit = {
    println("My Worker is onStart 向 MyMaster 注册... ....")
    masterRpcAddresses.map { masterAddress =>
      val masterEndpoint = rpcEnv.setupEndpointRef(masterAddress, MyMaster.ENDPOINT_NAME)
      masterEndpoint.send(TestAdd(100, 200))
      masterEndpoint.send(RegisterWorker(
        workerId,
        host,
        port,
        self,
        cores,
        memory,
        masterEndpoint.address))
    }
  }

  override def receive: PartialFunction[Any, Unit] = {
    case TestAdd(x, y) => println(s"x=$x, y=$y; x+y=${x+y}")

    // 收到注册信息的回复，发送心跳
    case RegisteredWorker(master, masterAddress) =>
      println(s"master = $master, masterAddress = $masterAddress")
      this.master = Some(master)
      forwordMessageScheduler.scheduleAtFixedRate(new Runnable {
        override def run(): Unit = Utils.tryLogNonFatalError {
          self.send(SendHeartbeat)
        }
      }, 0, HEARTBEAT_MILLIS, TimeUnit.MILLISECONDS)

    // 自己给自己发送的消息
    case SendHeartbeat =>
      master.get.send(Heartbeat(workerId, self))

    case _ => println("MyWorker 接收到未知消息")
  }
}

object MyWorker {
  val SYSTEM_NAME = "MyWorker"
  val ENDPOINT_NAME = "Worker"

  def main(argStrings: Array[String]) {
    val host = "localhost"
    val port = 8888
    val cores = 8
    val memory = 1000000
    val masters: Array[String] = Array("spark://localhost:10001")

    val conf = new SparkConf

    val systemName: String = SYSTEM_NAME
    val securityMgr = new SecurityManager(conf)
    val rpcEnv = RpcEnv.create(systemName, host, port, conf, securityMgr)
    val masterAddresses: Array[RpcAddress] = masters.map(RpcAddress.fromSparkURL(_))
    rpcEnv.setupEndpoint(ENDPOINT_NAME, new MyWorker(rpcEnv, cores, memory,
      masterAddresses, ENDPOINT_NAME, conf, securityMgr))

    rpcEnv.awaitTermination()
  }
}


  
   