package com.shujia.stream

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo5StructuredStreaming {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local[2]")
      .appName("ssc")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()


    import spark.implicits._
    import org.apache.spark.sql.functions._

    val df: DataFrame = spark
      .readStream //读取实时数据
      .format("socket")
      .option("host", "master")
      .option("port", 8888)
      .load()


    val wordsDF: DataFrame = df.select(explode(split($"value", ",")) as "word")


    /**
      * DSL
      *
      */

    //统计单词的数量
    /*val countDF: DataFrame = wordsDF
      .groupBy($"word")
      .agg(count($"word"))*/

    /**
      * 再流处理上写sql
      *
      */

    wordsDF.createOrReplaceTempView("word")

    val countDF: DataFrame = spark.sql(
      """
        |select word,count(1) as c from word group by word
        |
      """.stripMargin)

    /**
      * outputMode
      * Update :每次只输出更新的数据
      * Complete： 每次输出所有数据
      * Append： 只适用于追加的数据，使用分组的算子不能再使用Append
      *
      *
      */


    //在控制台打印
    /*countDF
      .writeStream
      .format("console") //输出到控制台
      .outputMode(OutputMode.Update())
      .start() //启动
      .awaitTermination() //等待关闭*/


    //将结果保存到mysql

    countDF
      .writeStream
      .outputMode(OutputMode.Complete())
      .foreachBatch((df, l) => {

        df.write
          .format("jdbc")
          .mode(SaveMode.Overwrite)
          .option("url", "jdbc:mysql://master:3306")
          .option("dbtable", "student.t_count")
          .option("user", "root")
          .option("password", "123456")
          .save()

      })
      .start() //启动
      .awaitTermination() //等待关闭


  }

}
