package com.shujia.stream

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream

object Demo4StreamOnRDDWC {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", 1)
      .appName("rdd")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val sc: SparkContext = spark.sparkContext


    val ssc = new StreamingContext(sc, Durations.seconds(5))

    //读取数据
    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)


    linesDS.foreachRDD(rdd => {


      val df: DataFrame = rdd.toDF("lines")

      /**
        * 当前batch单词的数量
        *
        */

      val countDF: DataFrame = df
        .select(explode(split($"lines", ",")) as "word")
        .groupBy($"word")
        .agg(count($"word") as "c")


      val path = "Spark/data/stream_count"

      /**
        * 第一次计算蓄意奥先判断目录是否存在
        *
        */

      val configuration = new Configuration()
      val fileSystem: FileSystem = FileSystem.get(configuration)

      /**
        * 如果之前有结果，将当前batch的结果和之前的结果合并再保存
        * 如果之前没有结果，直接保存当前的结果
        *
        */


      if (fileSystem.exists(new Path(path))) {
        /**
          * 读取上一次计算的结果
          *
          */
        val beDF: DataFrame = spark
          .read
          .format("csv")
          .schema("word STRING,c LONG")
          .load("Spark/data/stream_count")

        /**
          * 合并之前的结果和当前的计算结果
          *
          */

        val newDF: DataFrame = beDF
          .union(countDF)
          .groupBy($"word")
          .agg(sum($"c") as "cc")
          .select($"word", $"cc" as "c")

        //保存数据
        newDF
          .write
          .mode(SaveMode.Overwrite)
          .format("csv")
          .save("Spark/data/stream_count_temp")

        /**
          * 再spark 处理数据的过程中不能同时读写同一个目录
          *
          */

        //蟹盖目录的名称
        fileSystem.delete(new Path("Spark/data/stream_count"), true)
        fileSystem.rename(new Path("Spark/data/stream_count_temp"), new Path("Spark/data/stream_count"))


      } else {
        //保存数据
        countDF
          .write
          .mode(SaveMode.Overwrite)
          .format("csv")
          .save("Spark/data/stream_count")
      }

    })


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
