package com.shujia.spark.stream

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream

object Demo4DStreamToRDD {
  def main(args: Array[String]): Unit = {
    //创建spark环境
    val spark: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName("rdd")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val sc: SparkContext = spark.sparkContext

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    ssc.checkpoint("data/checkpoint")

    //读取数据
    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)

    /**
     * foreachRDD： 将DStream转换成RDD, 每隔一段时间一个RDD
     * 每循环一次，rdd中就是一个批次的数据
     */
    linesDS.foreachRDD(rdd => {
      println("执行foreachRDD")
      /**
       * 里面的代码每隔一段时间执行一次，每一次的数据不一样
       */

      //使用RDD 的APi统计单词的数量
      val countRDD: RDD[(String, Int)] = rdd
        .flatMap(line => line.split(","))
        .map(word => (word, 1))
        .reduceByKey((x, y) => x + y)
      //countRDD.foreach(println)

      /**
       * 将rdd转换成DF，使用Sql或者DSL处理数据
       */

      val linesDF: DataFrame = rdd.toDF()

      val countDF: DataFrame = linesDF
        .select(explode(split($"value", ",")) as "word")
        .groupBy($"word")
        .agg(count($"word") as "num")

      countDF.show()
    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
