package com.shujia.streaming

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo4DStream2RDD {
  def main(args: Array[String]): Unit = {
    //使用DataFrame的语法
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName("rdd与DStream的关系")
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()
    import org.apache.spark.sql.functions._
    import sparkSession.implicits._

    //使用RDD的语法
    val sparkContext: SparkContext = sparkSession.sparkContext

    //使用DStream的语法
    val streamingContext = new StreamingContext(sparkContext, Durations.seconds(5))

    val infoDS: ReceiverInputDStream[String] = streamingContext.socketTextStream("master", 10086)
    //如果DS不是键值形式的话，可以单独调用window函数进行设置窗口的形式
    val new_infoDS: DStream[String] = infoDS.window(Durations.seconds(10), Durations.seconds(5))

    // hello world java hello java
    /**
     * foreachRDD：在DS中使用rdd的语法操作数据
     * 缺点：该函数是没有返回值的
     * 需求：我们在想使用DS中的RDD的同时，想要使用结束后，会得到一个新的DS
     */
    new_infoDS.foreachRDD((rdd:RDD[String])=>{
      println("------------------------------")
//      val resRDD: RDD[(String, Int)] = rdd.flatMap(_.split(" "))
//        .map((_, 1))
//        .reduceByKey(_ + _)
//      resRDD.foreach(println)

      //rdd和df之间可以转换
      val df1: DataFrame = rdd.toDF.select($"value" as "info")
      df1.createOrReplaceTempView("words")
      val resDF: DataFrame = sparkSession.sql(
        """
          |select
          |t1.wds as word,
          |count(1) as counts
          |from
          |(
          |select
          |explode(split(info,' ')) as  wds
          |from words) t1
          |group by t1.wds
          |""".stripMargin)
      resDF.show()
    })

    streamingContext.start()
    streamingContext.awaitTermination()
    streamingContext.stop()

  }
}
