package com.study.spark.scala.structured_streaming

import org.apache.spark.sql.SparkSession

/**
  *
  * @author stephen
  * @create 2019-03-02 16:09
  * @since 1.0.0
  */
object StructuredKafka010WordCount {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("StructuredKafka010WordCount")
      .getOrCreate()

    //spark.conf.set("spark.sql.streaming.checkpointLocation", "./");

    val df = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "localhost:9092")
      .option("subscribe", "topic-in")
      .load()

    import spark.implicits._
    val ds = df.selectExpr("CAST(value AS STRING)")
      .as[(String)]

    val words = ds.flatMap(_.split(" "))
    val wordCounts = words.groupBy("value").count()

//    val query = wordCounts
//      .writeStream
//      .outputMode("update")
//      .format("console")
//      .start()

    val query = wordCounts.writeStream
      .format("kafka")
      .outputMode("complete")
      .option("checkpointLocation", "/Users/stephen/Documents/03code/java-demo/bigdata-study/study-spark/target/chk")
      .option("kafka.bootstrap.servers", "localhost:9092")
      .option("topic", "topic-out")
      .start()

    query.awaitTermination()

  }
}
