package com.shujia.stream

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.{Duration, Durations, StreamingContext}

object Demo4DStoRDD {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[2]")
      .appName("stream")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._

    val sc: SparkContext = spark.sparkContext

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)

    /**
      * DStream 底层是不断重复计算额rdd,
      * 可以将DStream转换成RDDl来使用
      *
      * foreachRDD相当于一个循环，每隔5秒执行一次，rdd的数据是当前batch接收到的数据
      *
      */

    linesDS.foreachRDD((rdd: RDD[String]) => {

      /*val kvRDD: RDD[(String, Int)] = rdd.flatMap(_.split(",")).map((_, 1))
      val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey(_ + _)
      countRDD.foreach(println)*/

      /**
        * RDDk可以转换成DF
        *
        */

      val linesDF: DataFrame = rdd.toDF("line")

      //注册一张表
      linesDF.createOrReplaceTempView("lines")

      val countDF: DataFrame = spark.sql(
        """
          |select word,count(1) as c from (
          |select explode(split(line,',')) as word from lines
          |) as a group by word
          |
        """.stripMargin)

      countDF.show()

    })


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
