package sparkStreaming

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.util.Random

object Demo01_streaming {

  /**
   * 获取 StreamingContext
   * @return
   */
  def getStreamingContext(): StreamingContext = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("streaming")
    new StreamingContext(conf, Seconds(5))
  }

  /**
   * 读取 socket 的数据
   * @param ssc
   */
  def getSocketData(ssc:StreamingContext) = {

    //读取socket的实时数据
    val lineDStream: ReceiverInputDStream[String] = ssc.socketTextStream("node101", 44444)

    //对接受的实时数据做wc处理
    lineDStream
      .flatMap(_.split(" "))
      .map((_,1))
      .reduceByKey(_+_)
      .print()
  }

  /**
   * 读取 hdfs 的数据
   * @param ssc
   */
  def getHDFSData(ssc:StreamingContext) = {

    //读取socket的实时数据
    val lineDStream: DStream[String] = ssc.textFileStream("hdfs://node101:9820/spark")

    //对接受的实时数据做wc处理
    lineDStream
      .flatMap(_.split(" "))
      .map((_,1))
      .reduceByKey(_+_)
      .print()
  }

  /**
   * 读取kafka的数据
   * @param ssc
   */
  def getKafkaData(ssc:StreamingContext) = {

    //设置kafka消费者相关属性
    val kafkaPara = Map[String,Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "node101:9092,node102:9092,node103:9092,node104:9092",
      ConsumerConfig.GROUP_ID_CONFIG -> "supermarket",
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG ->
        "org.apache.kafka.common.serialization.StringDeserializer",
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG ->
        "org.apache.kafka.common.serialization.StringDeserializer"
    )

    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Set("supermarket"), kafkaPara)
    )

    kafkaDStream.map(value=>value.value()).print()

  }

  /**
   * 读取自定义数据源
   *
   * @param ssc
   */
  def getMyselfData(ssc: StreamingContext) = {
    val dstream = ssc.receiverStream(new Receiver[String](StorageLevel.MEMORY_ONLY) {

      var flag = true

      override def onStart(): Unit = {

        val rand = new Random()
        new Thread(new Runnable {
          override def run(): Unit = {
            while(flag){
              val num = rand.nextInt()
              val message = s"data:${num}"
              store(message)
              Thread.sleep(3000)
            }
          }
        }).start()
      }

      override def onStop(): Unit = {
        flag = false
      }
    })

    dstream.print()
  }


  /**
   * transformation算子 中的 transform
   * 对RDD级别进行处理 map是对数据级别进行处理
   * @param ssc
   */
  def testTransform(ssc:StreamingContext) = {
    val blackList = List("宋小宝","大橙子","大鬼","小鬼")
    val lines = ssc.socketTextStream("node101",9999)
    val resDstream = lines.transform(rdd=>{
      rdd.flatMap(_.split(" ")).filter(!blackList.contains(_))
    })
    resDstream.print()
  }

  /**
   * 状态：在实时处理中的一个概念
   * int sum = 0
   * for(int i=1;i<=10;i++){
   *  sum=sum+i
   * }
   */
  def testUpdateStateByKey(ssc:StreamingContext) = {
    ssc.checkpoint("D://resources/updateStateByKey")
    val lines = ssc.socketTextStream("node101",9999)
    val words = lines.flatMap(_.split(" "))
    val pairs = words.map((_,1))
    val wordCounts = pairs.updateStateByKey((values: Seq[Int], state: Option[Int]) => {
      var newValue = state.getOrElse(0)
      for (value <- values) {
        newValue += value
      }
      Option(newValue)
    })
    wordCounts.print()
  }



  def main(args: Array[String]): Unit = {

    val ssc: StreamingContext = getStreamingContext()

//    getSocketData(ssc)
//    getHDFSData(ssc)
//    getKafkaData(ssc)
//    getMyselfData(ssc)
//    testTransform(ssc)
    testUpdateStateByKey(ssc)

    ssc.start()
    ssc.awaitTermination()

    ssc.stop(stopSparkContext = false)

  }

}
