package org.zjt.spark.dstream

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming._

/**
  * DESC DataFrame、Dstream的使用
  *
  * @author
  * @create 2017-05-15 下午6:37
  **/
object DstreamTest extends App {

  val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount").set("spark.driver.allowMultipleContexts", "true")
  val ssc = new StreamingContext(conf, Seconds(2))


  /*
  val wordCounts = ssc.socketTextStream("localhost", 9002).flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_)
  wordCounts.print()
  */


  /**
    * Dstream状态持续累加：
    *
    * updateStateByKey() 有状态操作
    *
    * 1、按照key分组
    * 2、fcuntion（ values : Seq[Int],option : Option[Int]）
    * values 为按照key分组后的values集合，option为返回的value。
    * 3、将返回updateStateByKey保存，保存返回数据。
    * 4、必须对该函数做ckeckpoint保存数据。
    * 5、一直把数据叠加
    *
    * transform 对DStream中的每个RDD处理，结果RDD封装为DStream
    */

  ssc.checkpoint("D:\\Idea workspace\\scala-demo\\src\\main\\resource\\ckeckpoint")
  val wordCounts = ssc.socketTextStream("192.168.83.133", 9002).flatMap(_.split(" ")).map((_, 1))

    // TODO:  transform 对DStream中的每个RDD处理，结果RDD封装为DStream
    .transform(rdd => rdd.map(x => (x._1.toUpperCase(), x._2)))
    .updateStateByKey((a, b: Option[Int]) => {
      //println("updateStateByKey  a:" + a.mkString(",") + " , " + b)
      if (b == None) Some(a.length) else b
    }).window(Seconds(30), Seconds(10))


  // TODO: 将多个rdd遍历    DTream -> RDD
  wordCounts.foreachRDD(rdd => println(rdd.collect().mkString(",")))


  // TODO: 将rrd遍历保存到数据库中。
  wordCounts.foreachRDD { rdd =>

    // 新建spark链接
    val spark = SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()

    // Convert RDD[String] to DataFrame    DataFrame：将rdd转化为数据库字段，包含隐式转化
    import spark.implicits._
    val wordsDataFrame = rdd.toDF("word", "cnt")

    // Create a temporary view
    wordsDataFrame.createOrReplaceTempView("words")

    // Do word count on DataFrame using SQL and print it
    val wordCountsDataFrame = spark.sql("select word,sum(cnt) as cnt , count(*) as total from words group by word")
    wordCountsDataFrame.show()

  }


  ssc.start()
  ssc.awaitTermination()
  ssc.stop()
}
