package org.huangrui.spark.scala.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext, StreamingContextState}

/**
 * @Author hr
 * @Create 2024-10-22 3:40 
 */
object SparkStreaming08_Close {
  def main(args: Array[String]): Unit = {

    /*
       线程的关闭：
       val thread = new Thread()
       thread.start()

       thread.stop(); // 强制关闭
     */

    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming")
    val sc: StreamingContext = new StreamingContext(sparkConf, Seconds(3))

    val lines: ReceiverInputDStream[String] = sc.socketTextStream("localhost", 9999)
    val wordToOne: DStream[(String, Int)] = lines.flatMap(_.split(" ")).map((_, 1))

    wordToOne.print()

    sc.start()

    // 如果想要关闭采集器，那么需要创建新的线程
    // 而且需要在第三方程序中增加关闭状态
    new Thread(
      new Runnable {
        override def run(): Unit = {
          // 优雅地关闭
          // 计算节点不在接收新的数据，而是将现有的数据处理完毕，然后关闭
          // Mysql : Table(stopSpark) => Row => data
          // Redis : Data（K-V）
          // ZK    : /stopSpark
          // HDFS  : /stopSpark
          /*
          while ( true ) {
              if (true) {
                  // 获取SparkStreaming状态
                  val state: StreamingContextState = ssc.getState()
                  if ( state == StreamingContextState.ACTIVE ) {
                      ssc.stop(true, true)
                  }
              }
              Thread.sleep(5000)
          }
           */

          Thread.sleep(5000)
          val state: StreamingContextState = sc.getState()
          if (state == StreamingContextState.ACTIVE) {
            sc.stop(true, true)
          }
          System.exit(0)
        }
      }
    ).start()

    sc.awaitTermination() // block 阻塞main线程
  }
}
