package com.atguigu.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Duration, StreamingContext, StreamingContextState}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object SprakStreaming11_Close {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("StreamWordCount")

    //sparkStreaming程序可能需要业务升级，技术更新，导致采集功能正常关闭
    //关闭时需要调用stop方法，可能调用位置不合适,所以创建一个新的线程去关闭.
    val ssc = new StreamingContext(conf,Duration(3000))


    val socketData: ReceiverInputDStream[String] = ssc.socketTextStream("localhost",9999)

    val words: DStream[String] = socketData.flatMap(_.split(" "))
    val wordToOne: DStream[(String, Int)] = words.map((_,1))
    val wordToCount: DStream[(String, Int)] = wordToOne.reduceByKey(_ + _)
    wordToCount.print()



    ssc.start()
    //TODO 启动新的线程去关闭SparkStarming采集器
    new Thread(
      new Runnable {
        override def run(): Unit = {
          //当业务升级时候，或者是什么什么的时候，停止
          //MYSQL-TABLE -status[0]->1改变,所以一般设置中间插件去设定状态.
          Thread.sleep(3000)
          val state: StreamingContextState = ssc.getState()
          if(state == StreamingContextState.ACTIVE)
          ssc.stop(true,true)//优雅的关闭
          //所谓优雅的关闭，就是driver不给executer发送数据，executer处理完数据自行关闭.
        }
      }
    ).start()
    //线程的关闭.线程的关闭一般不采用stop方法，一把是跳出run方法.
    ssc.awaitTermination()







  }
}
