package com.catmiao.spark.stream

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext, StreamingContextState}

import java.lang.Thread

/**
 * @title: SparkStreaming01_WordCount
 * @projectName spark_study
 * @description: TODO
 * @author ChengMiao
 * @date 2024/3/25 00:31
 */
object SparkStreaming10_Close {

  def main(args: Array[String]): Unit = {


    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming")
    val ssc = new StreamingContext(sparkConf, Seconds(3))

    val line: ReceiverInputDStream[String] = ssc.socketTextStream("localhost", 9999)

    val wordToOne: DStream[(String, Int)] = line.map((_, 1))

    // 窗口的范围应该是采集周期的整数倍
    // 窗口是可以滑动的 默认情况下一个采集周期进行滑动
    // 如果不想有重复数据计算，可以改变滑动幅度【步长】
    val windowDS: DStream[(String, Int)] = wordToOne.window(Seconds(6),Seconds(6))

    val value: DStream[(String, Int)] = windowDS.reduceByKey(_ + _)

//    value.print()

    ssc.start()
    ssc.awaitTermination()

    /**
     * 想要关闭采集器，由于awaitTermination会阻塞当前线程，需要创建一个新的线程来关闭
     */
    new Thread(
      new Runnable {
        override def run(): Unit = {
          /**
           * param1： 停止spark环境
           * param2：优雅的关闭，采集器不再接收数据，将现有数据处理完毕，然后关闭
           *
           * 利用第三方判断状态,存储关闭状态: MySQL、Redis、ZK、HDFS
           */
          val flag = true
          while(true){
            if(flag){
              // 获取SparkStreaming本身环境状态
              val state: StreamingContextState = ssc.getState()
              if(state == StreamingContextState.ACTIVE){ // 活动状态才关闭
                ssc.stop(true, true)
              }
              System.exit(0)

            }
            Thread.sleep(5000)
          }
        }
      }
    ).start()


  }


}
