package zy.learn.demo.structuredstreaming.source

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.OutputMode

/**
 * 通过 Batch 模式创建 Kafka 工作流
 */
object KafkaSourceBatch {
  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().set("spark.sql.shuffle.partitions", "3")

    val spark = SparkSession.builder()
      .master("local[2]")
      .config(sparkConf)
      .appName("KafkaSource Batch聚合")
      .getOrCreate()

    import spark.implicits._

    /*
     kafka-topics.sh --create --bootstrap-server co7-203:9092 --topic topic1 --replication-factor 1 --partitions 1
     kafka-console-producer.sh --broker-list co7-203:9092 --sync --topic topic1
     */


    val df = spark.read  // 使用 read 方法以batch模式读取,而不是 readStream 方法
      .format("kafka") // 设置 kafka 数据源
      .option("kafka.bootstrap.servers", "co7-203:9092,co7-204:9092,co7-205:9092")
      .option("subscribe", "topic1") // 也可以订阅多个主题:   "topic1,topic2"
//      .option("startingOffsets", "earliest")  // 默认配置
      .option("startingOffsets", """ {"topic1":{"0":10}} """ )
      .option("endingOffsets", "latest")      // 默认配置
      .load
      .selectExpr("cast(value as string)") // 选取字段，且将 value由byte转化为string
      .as[String].flatMap(_.split(" "))
    df.createOrReplaceTempView("table1")

    val resultDF = spark.sql(
      """
        | select value, count(1) as cnt
        |   from table1
        |  group by value
        |""".stripMargin)

    resultDF.write  // 批处理方式不支持OutputMode
      .option("truncate", false)  // 将打印的内容显示完全
      .format("console")
//      .save("E:\\MyGit\\spark-project\\data\\out")
      .save()
  }
}
