package com.shujia.spark.stream

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object Demo5DStreamOnDF {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName("df")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    val sc: SparkContext = spark.sparkContext

    /**
      * 创建spark streaming环境
      *
      */

    val ssc = new StreamingContext(sc, Durations.seconds(5))
    //1、读取数据
    val lineDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)


    /**
      * 将DStream转换成RDD
      *
      * foreachRDD: 将DStream转换成RDD来使用，没有返回值
      *
      */

    lineDS.foreachRDD(rdd => {

      //读取之前的计算结果
      val lastDF: DataFrame = spark
        .read
        .format("csv")
        .schema("word STRING,c LONG")
        .load("data/ds_on_df")


      //使用rdd的api
      val df: DataFrame = rdd.toDF("line")
      df.createOrReplaceTempView("words")

      //当前批次单词的数量
      val countDF: DataFrame = spark.sql(
        """
          |select word,count(1)as c from (
          |select explode(split(line,',')) as word from words
          |)  as a
          |group by word
          |
        """.stripMargin)

      //将当前的批次的结果和之前批次的结果加起来

      val resultDF: DataFrame = lastDF.union(countDF)
        .groupBy($"word")
        .agg(sum($"c") as "c")

      //保存数据
      resultDF
        .write
        .format("csv")
        .mode(SaveMode.Overwrite)
        .save("data/ds_on_df_temp")


      //将路径改回去
      val config = new Configuration()
      val fileSystem: FileSystem = FileSystem.get(config)

      //删除之前的
      if (fileSystem.exists(new Path("data/ds_on_df"))) {
        fileSystem.delete(new Path("data/ds_on_df"), true)
      }
      //改名
      fileSystem.rename(new Path("data/ds_on_df_temp"), new Path("data/ds_on_df"))


    })


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
