package com.example

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object StructureStreamTest {

  def main(args: Array[String]): Unit = {
    //1 获取sparksession
    val spark: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("kafka-mysql")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("WARN")
    import spark.implicits._
    //2 定义读取kafka数据源
    val kafkaDf: DataFrame = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "node2:9092")
      .option("subscribe", "lg_bus_info")
      .load()
    //3 处理数据
    val kafkaValDf: DataFrame = kafkaDf.selectExpr("CAST(value AS STRING)")
    //转为ds
    val kafkaDs: Dataset[String] = kafkaValDf.as[String]
    val busInfoDs: Dataset[BusInfo] = kafkaDs.map(msg => {
      BusInfo(msg)
    })
    val writer = new MysqlWriter
    //4 输出,写出数据到Mysql
    busInfoDs.writeStream
      .foreach(
        writer
      )
      .outputMode("append")
      .start()
      .awaitTermination()
  }

}
