package org.niit.streaming

import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.util.Random

/*
 Spark Streaming 是用来实时获取数据，Kafka Flume 套接字 HDFS,唯独像MySQL是没有办法获取实时数据，因为没有相应的方法就获取数据
 因为这种情况，那么就需要自定义接收流
 */
object SparkStreaming_03 {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("spark")
    val ssc = new StreamingContext(sparkConf, Seconds(3))
    ssc.sparkContext.setLogLevel("ERROR")

    //创建自定义receiver的Streaming
    val lines: ReceiverInputDStream[String] = ssc.receiverStream(new MyReceiver)
    //打印
    lines.print()

    ssc.start()
    ssc.awaitTermination()
  }
  /*
    自定义数据采集器
   */
class MyReceiver extends Receiver[String](StorageLevel.MEMORY_ONLY) {

    private var flag =true

    //启动采集数据的方法
  override def onStart(): Unit = {
    new Thread(new Runnable {
      override def run(): Unit = {
        while (flag){

          val message = "采集的数据为："+ new Random().nextInt(10).toString
          //返回数据，不能用return
          store(message)
        }
      }
    }).start()
  }

  override def onStop(): Unit = {
    flag = false
  }
}


}
