package org.apache.spark.netty

import java.util.concurrent.TimeUnit
import org.apache.spark.{SparkConf, SparkContext, SparkEnv}
import org.apache.spark.common.Message.{CheckTimeOut, ClientInfo, Heartbeat, NettyClientInfo, NotifyOffline, NotifyOnline, RegisterNettyClient, RegisteredNettyClient}
import org.apache.spark.rpc.{RpcAddress, RpcCallContext, RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.sql.SparkSession
import org.apache.spark.util.{ThreadUtils, Utils}
import org.apache.spark.common.Constant

import scala.collection.mutable

/**
 * @author: chenzhidiao
 * @date: 2021/1/7 7:37
 * @description:
 * @version: 1.0
 */
class MyNettyRpcServer(override val rpcEnv: RpcEnv) extends RpcEndpoint {

  //服务端用于保存客户端注册信息的容器
  var clientsInfo = new mutable.HashMap[String, NettyClientInfo]()
  //服务端用于保存去重后的所有客户端信息
  var clients = new mutable.HashSet[NettyClientInfo]()
  //存储所有注册客户端的EndpointRef代理
  var clientsEndpointRef = new mutable.HashMap[String, RpcEndpointRef]()

  override def onStart(): Unit = {

    //启动一个定时任务，每隔3秒检查是否有客户端心跳超时
    ThreadUtils.newDaemonSingleThreadScheduledExecutor("server-heartbeat-check-thread")
      .scheduleAtFixedRate(new Runnable {
        override def run(): Unit = Utils.tryLogNonFatalError {
          /**
           * 给自己发送一个心跳检查的消息
           */
          self.send(CheckTimeOut)
        }

      }, 0, 3000, TimeUnit.MILLISECONDS)
  }

  //只接收消息
  override def receive: PartialFunction[Any, Unit] = {

    case CheckTimeOut => {
      var currentTime = System.currentTimeMillis()
      clients.filter(client => {
        val heartBeatTimeout = 10000
        val bool = currentTime - client.lastHeartBeatTime > heartBeatTimeout
        //如果客户端心跳超时，通知其他客户端该客户端下线
        if (bool) {
          clients.foreach(onlineClient => {
            if (onlineClient.clientid != client.clientid) {
              //获取通知对象的RpcEndpointRef代理
              var clientRef: RpcEndpointRef = clientsEndpointRef.get(onlineClient.clientid).get

              clientRef.send(NotifyOffline(client.clientid))
            }
          })
          println(s"客户端 ${client.clientid} 下线")
        }
        bool
      }).foreach(deadclient => {
        clients -= deadclient
        clientsInfo.remove(deadclient.clientid)
      })
      println("当前在线的客户端数为：" + clientsInfo.size + "\t分别是：" + clients.map(x => "\r\n\t"+x.toString)
        .mkString(","));
    }

    case Heartbeat(clientid)=>{
      val currentTime = System.currentTimeMillis()
      var newInfo = clientsInfo(clientid)
      //更新最后一次心跳时间
      newInfo.lastHeartBeatTime = currentTime
      clientsInfo(clientid) = newInfo
      clients += newInfo
    }

    case RegisterNettyClient(clientid, hostname, port, clientEndpointName) => {
      //封装客户端信息对象
      val nettyClientInfo = new NettyClientInfo(clientid, hostname, port)

      println(s"客户端 ${clientid} 上线")

      clientsInfo.put(clientid, nettyClientInfo)
      clients += nettyClientInfo

      //创建客户端EndpointRef代理对象
      val clientEndpointRef = rpcEnv.setupEndpointRef(new RpcAddress(hostname, port), clientEndpointName)

      clientsEndpointRef.put(clientid,clientEndpointRef)

      //向客户端返回注册成功的消息
      clientEndpointRef.send(RegisteredNettyClient("注册成功"))

      //通知其它在线客户端有新的客户端上线
      clients.foreach(onlineClient => {
        if (onlineClient.clientid != clientid) {
          val clientRef = clientsEndpointRef.get(onlineClient.clientid).get

          clientRef.send(NotifyOnline(clientid))
        }
      })
    }
  }
}

object MyNettyRpcServer {
  def main(args: Array[String]): Unit = {
     val conf = new SparkConf()
     val sparkSession: SparkSession = SparkSession.builder().config(conf).appName("NettyRpcTest").master("local[*]").getOrCreate()
     val sc: SparkContext = sparkSession.sparkContext
     val sparkEnv: SparkEnv = sc.env


    val rpcEnv:RpcEnv = RpcEnv.create(Constant.NettyRpcEnvName,Constant.NettyServerHostName,Constant.NettyServerPort,conf,sparkEnv.securityManager,false)

    rpcEnv.setupEndpoint(Constant.NettyServerEndpointName,new MyNettyRpcServer(rpcEnv))

    rpcEnv.awaitTermination()
  }

}
