package com.yujiahao.bigdata.streaming

import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.util.UUID

object Stream_Source_Diy {
  def main(args: Array[String]): Unit = {
    //TODO SparkStreaming环境
    val conf = new SparkConf().setMaster("local[*]").setAppName("WordCount")
    //StreamingContext的构造方法第一个参数是配置文件，第二个参数表示数据采集的周期（微批次）
    val ssc: StreamingContext = new StreamingContext(conf, Seconds(3))

    //自定义采集器
    val diyDS: ReceiverInputDStream[String] = ssc.receiverStream(new My_Receiver())

    diyDS.print()
    //启动采集器
    // No output operations registered, so nothing to execute
    ssc.start()
    //Driver等待采集器的结束，否则，当前Driver处于阻塞状态
    ssc.awaitTermination()

  }
  //TODO 自定义数据采集器。1、继承类Receiver。2、定义泛型[T]：采集数据的类型（传递数据的存储级别的参数）

  class My_Receiver extends Receiver[String](StorageLevel.MEMORY_ONLY_SER) {
    //采集器启动后执行的逻辑
    private var flag = true

    override def onStart(): Unit = {

      while (flag) {
        val uuid: String = UUID.randomUUID().toString
        //每个采集器都有这个方法，将数据存起来
        store(uuid)
        Thread.sleep(1000)
      }
    }

    //采集器停止后的执行逻辑
    override def onStop(): Unit = {
      flag = false
    }
  }
}
