package com.study.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext, StreamingContextState}

object SparkStreaming08_Close {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming")
    val ssc =new StreamingContext(sparkConf,Seconds(3))
    ssc.checkpoint("cp")

    val lines = ssc.socketTextStream("localhost", 9999)
    val wordToOne = lines.map((_, 1))

    val wordToCount = wordToOne.reduceByKeyAndWindow(
      (x: Int, y: Int) => {x + y},
      (x: Int, y: Int) => {x - y},
      Seconds(9), Seconds(3)
    )
    wordToCount.print()
    ssc.start()
    //如果要关闭采集器，那么需要创建新的线程
    //而且需要在第三方的程序中增加关闭状态
    new Thread(
      new Runnable {
        override def run(): Unit = {
          //优雅的关闭
          //计算结点不再接收新的数据，而是将现有的数据处理完成后，关闭
   /*       while (true){
            if (true){
              //火球sparkStreaming的状态
              val state = ssc.getState()
              if(state==StreamingContextState.ACTIVE){
                ssc.stop(true,true)
              }
            }
            Thread.sleep(5000)
          }*/
          Thread.sleep(5000)
          val state = ssc.getState()
          if(state==StreamingContextState.ACTIVE){
            ssc.stop(true,true)
          }
          System.exit(0)
        }
      }
    ).start()
    ssc.awaitTermination()
  }
}
