package com.haozhen.pgproject

/**
  * @author haozhen
  * @email haozh@ync1.com
  * @date 2021/2/7  13:11
  */
object RealTimeProcess {

  def main(args: Array[String]): Unit = {
    import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
    System.setProperties("HADOOP_USER_NAME","root")
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName(RealTimeProcess.getClass.getName)
      .getOrCreate()
    spark.sparkContext.setLogLevel("WARN")
    import spark.implicits._
    val kafkaDf: DataFrame = spark.readStream.format("kafka")
      .option("kafka.bootstrap.servers", "test3:9092,test2:9092")
      .option("subscribe", "gp_app1")
      .load()
    val df1: DataFrame = kafkaDf.selectExpr("case(value as string")

    val ds1: Dataset[String] = df1.as[String]

    val ds2: Dataset[BusInfo] = ds1.map(BusInfo(_)).filter(_!=null)

    ds2.writeStream
      .foreach(new JdbcWriter)
      .outputMode("append")
      .start()
      .awaitTermination()

  }
}
