package com.shujia.spark.streaming

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo4SSCToMysql {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName("mysql")
      .getOrCreate()

    import spark.implicits._

    import org.apache.spark.sql.functions._

    /**
      * 创建StreamingContext上下文对象，指定batch的间隔时间,多久计算一次
      */
    val ssc = new StreamingContext(spark.sparkContext,Durations.seconds(5))

    ssc.checkpoint("data/checkpoint2")

    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master",8888)

    //linesDS.print()

    val wordsDS: DStream[(String, Int)] = linesDS.flatMap(_.split(",")).map((_,1))

    val wordCount: DStream[(String, Int)] = wordsDS.reduceByKey(_+_)

    //wordCount.print()

    //定义更新状态方法，参数 seq 为当前批次单词频度， option 为以往批次单词频度
    def updateFun(seq: Seq[Int],option: Option[Int]):Option[Int] ={

      //计算当前batch单词的数量
      val currentCount: Int = seq.sum

      //获取之前单词的数量
      val lastCount: Int = option.getOrElse(0)

      //返回最新单词的数量
      Some(currentCount + lastCount)
    }

    val wc: DStream[(String, Int)] = wordCount.updateStateByKey(updateFun)

    wc.foreachRDD(rdd =>{

      val countDF: DataFrame = rdd.toDF("word","count")

      countDF.show()

      //将结果保存到mysql
      countDF
        .write
        .format("jdbc")
        .mode(SaveMode.Overwrite)
        .option("url","jdbc:mysql://master:3306?useUnicode=true&characterEncoding=utf-8")
        .option("dbtable","student.wordcount")
        .option("user","root")
        .option("password","123456")
        .save()

    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
