package com.shujia.streeam

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo3DSToRDDtoDF {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("Demo1WordCount")
    conf.set("spark.sql.shuffle.partitions", "2")
    val sc = new SparkContext(conf)


    val sqlContext = new SQLContext(sc)

    import sqlContext.implicits._

    /**
      *
      * 创建 streamming上下文对象  需要指定间隔时间   延迟时间
      *
      */
    val ssc = new StreamingContext(sc, Durations.seconds(5))

    /**
      * 读取socket创建DS
      * nc -lk 8888
      *
      */

    val studentDS = ssc.socketTextStream("node1", 8888)


    /**
      * foreachRDD   ds  --->  rdd
      *
      */

    //studentDS.print()

    studentDS.foreachRDD(rdd => {
      val time = System.currentTimeMillis()
      //rdd.saveAsTextFile("spark/data/ds" + time)
    })

    /**
      * 自定义实现updataStateByKey
      *
      */


    //Driver 端   只执行一次
    println("asdasd")

    studentDS.foreachRDD(rdd => {

      //Driver 端   每一个batch都会执行一次
      println("====")

      /**
        * 判断临时目录是否存在
        *
        */

      val fs = FileSystem.get(new Configuration())

      val flag = fs.exists(new Path("spark/data/tmp"))

      /**
        * rdd ---> df
        *
        */
      val df = rdd.map(line => {
        val split = line.split(",")
        val id = split(0)
        val name = split(1)
        val age = split(2).toInt
        val gender = split(3)
        val clazz = split(4)

        (id, name, age, gender, clazz)
      }).toDF("id", "name", "age", "gender", "clazz")

      df.registerTempTable("student")

      //
      var currDF = sqlContext.sql("select clazz,count(1) c1 from student group by clazz")


      /**
        * 合并之前统计结果和当前统计结果
        *
        */

      if (flag) {
        /**
          * 读取之前保存的结果
          *
          */
        val lastDF = sqlContext.read.parquet("spark/data/tmp")

        val currRDD = currDF.rdd.map(row => {
          val clazz = row.getAs[String]("clazz")

          val c = row.getAs[Long]("c1")
          (clazz, c)
        })

        val lastRDD = lastDF.rdd.map(row => {
          val clazz = row.getAs[String]("clazz")

          val c = row.getAs[Long]("c1")
          (clazz, c)
        })

        currRDD.union(lastRDD).toDF("clazz", "coun").registerTempTable("student_tmp")

        currDF = sqlContext.sql("select clazz,sum(coun) as c1 from student_tmp group by clazz")

      }


      /**
        * Overwrite  会先删除输出默认
        *
        */
      currDF.write.mode(SaveMode.Overwrite).parquet("spark/data/tmp1")

      currDF.show()


      fs.delete(new Path("spark/data/tmp"), true)
      fs.rename(new Path("spark/data/tmp1"), new Path("spark/data/tmp"))

    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }
}
