package com.shujia.spark.streaming

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.{Duration, Durations, StreamingContext}

object Demo4DStreamTORDDAndDF {
  def main(args: Array[String]): Unit = {
    /**
     * 创建sparkSession
     *
     */
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[2]")
      .appName("ds")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._

    val sc: SparkContext = spark.sparkContext

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    /**
     * 读取socket得到一个ds
     *
     */
    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)

    /**
     * DStream底层也是RDD, 每隔一段时间将接受到的数据封装成一个RDD
     * 每隔5秒一个rdd,rdd中的数据是不一样的
     *
     * 转换成rdd之后不能使用有状态算子
     *
     */
    linesDS.foreachRDD((rdd: RDD[String]) => {
      println("正在处理数据")
      //在这里面可以写rdd代码
      rdd
        .flatMap(_.split("<"))
        .map((_, 1))
        .reduceByKey(_ + _)
      //.foreach(println)


      /**
       * rdd可以转换成DF,就可以写sql了
       *
       */

      val linesDF: DataFrame = rdd.toDF("line")
      linesDF.createOrReplaceTempView("lines")

      val countDF: DataFrame = spark.sql(
        """
          |select word,count(1) as c from (
          |select explode(split(line,',')) as word
          |from lines
          |) as a
          |group by word
          |
          |""".stripMargin)

      countDF.show()

    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }

}
