package zy.learn.demo.structuredstreaming.source

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.{OutputMode, Trigger}

object KafkaSource {
  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().set("spark.sql.shuffle.partitions", "3")

    val spark = SparkSession.builder()
      .master("local[2]")
      .config(sparkConf)
      .appName("KafkaSource")
      .getOrCreate()

    // 得到的 df 的 schema 是固定的: key,value,topic,partition,offset,timestamp,timestampType
    val df = spark.readStream
      .format("kafka") // 设置 kafka 数据源
      .option("kafka.bootstrap.servers", "co7-203:9092,co7-204:9092,co7-205:9092")
      .option("subscribe", "topic1") // 也可以订阅多个主题:   "topic1,topic2"
      .load
      .select("value", "timestamp")   // 选取字段
      .selectExpr("cast(value as string)") // 选取字段，且将 value由byte转化为string

    /*
     kafka-topics.sh --create --bootstrap-server co7-203:9092 --topic topic1 --replication-factor 1 --partitions 1
     kafka-console-producer.sh --broker-list co7-203:9092 --sync --topic topic1
     */

    df.writeStream
      .outputMode(OutputMode.Update())
      .option("truncate", false)  // 将打印的内容显示完全
      .format("console")
      .trigger(Trigger.Continuous(1000))
      .start
      .awaitTermination()
  }
}
