package com.shujia.stream

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object Demo3DStreamToRDD {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession
      .builder()
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", 1)
      .appName("rdd")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val sc: SparkContext = spark.sparkContext


    val ssc = new StreamingContext(sc, Durations.seconds(5))

    //读取数据
    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)

    /**
      * foreachRDD: 将DStream转换成rdd
      *
      */


    linesDS.foreachRDD(rdd => {
      println("正在执行foreachRDD")

      //rdd代码
      rdd
        .flatMap(_.split(","))
        .map((_, 1))
        .reduceByKey(_ + _)
      // .foreach(println)


      /**
        * 将rdd再转换成DataFrame
        *
        */

      val df: DataFrame = rdd.toDF("line")

      //注册成一张表编写sql
      df.createOrReplaceTempView("lines")


      df.select(explode(split($"line", ",")) as "word")
        .groupBy($"word")
        .agg(count($"word") as "c")
        .show()


    })

    /**
      * transform ： 将DStream转换成rdd , 在transform里面编写rdd的代码，写完之后需要返回一个新的rdd,
      *
      */

    val kvDS: DStream[(String, Int)] = linesDS.transform(rdd => {

      val kvRDD: RDD[(String, Int)] = rdd
        .flatMap(_.split(","))
        .map((_,1))

      kvRDD
    })

    kvDS.reduceByKey(_+_)
        .print()


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
