package com.shujia.streaming

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo4DStreamToRDD {
  def main(args: Array[String]): Unit = {
    /**
     * 创建SparkSession
     */
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName("DS2RDD演示")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import sparkSession.implicits._
    import org.apache.spark.sql.functions._

    val sparkContext: SparkContext = sparkSession.sparkContext
    val streamingContext = new StreamingContext(sparkContext, Durations.seconds(5))

    /**
     * 通过spark streaming读取数据得到一个DS
     */
     //hello hello world java hello hadoop
    val linesDS: ReceiverInputDStream[String] = streamingContext.socketTextStream("master", 12345)

    val new_linesDS: DStream[String] = linesDS.window(Durations.seconds(15), Durations.seconds(5))

    /**
     * DStream底层也是RDD，每隔一个批次将接收到的数据封装到RDD中
     * 每隔一个批次，接收到数据是不一样的
     */
    new_linesDS.foreachRDD((rdd: RDD[String]) => {
      println("===================================================")
      println("正在处理当前批次的数据.....")
      println("===================================================")
      //在这里面直接写处理rdd的代码
//      rdd.flatMap(_.split(" "))
//        .map((_, 1))
//        .groupBy(_._1)
//        .map((kv: (String, Iterable[(String, Int)])) => (kv._1, kv._2.size))
//        .foreach(println)

      /**
       * 既然这里可以操作rdd, 又因为rdd可以转df, 那么我就可以在spark streaming中一个批次的数据可以使用使用sql语句来分析
       */
      //def toDF(colNames: String*)
      val linesDF: DataFrame = rdd.toDF("line")
      linesDF.createOrReplaceTempView("words")
      sparkSession.sql(
        """
          |select
          |t1.word as word,
          |count(1) as number
          |from
          |(select
          | explode(split(line,' ')) as word
          |from
          |words) t1
          |group by t1.word
          |""".stripMargin).show()
    })

    streamingContext.start()
    streamingContext.awaitTermination()
    streamingContext.stop()
  }
}
