package cn.lg.monitor

import cn.lg.monitor.MySqlWriter.createConn
import cn.lg.monitor.{HbaseWriter, RedisWriter}
import com.lg.bean.BusInfo
import org.apache.spark.sql.{Column, DataFrame, Dataset, SparkSession}

import java.sql.DriverManager

/**
  * 使用结构化流读取kafka中的数据
  */
object RealTimeProcess {
  def createConn() = {
    Class.forName("com.mysql.jdbc.Driver")
    DriverManager.getConnection("jdbc:mysql://linux123:3306/test? useSSL=false&characterEncoding=utf8","root","12345678")
  }

  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "root")
    //1 获取sparksession
    val spark: SparkSession = SparkSession.builder()
      .master("local[2]")
      .appName(RealTimeProcess.getClass.getName)
      .getOrCreate()
    spark.sparkContext.setLogLevel("WARN")
    import spark.implicits._
    //2 定义读取kafka数据源
    val kafkaDf: DataFrame = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "linux121:9092,linux122:9092")
      .option("subscribe", "lg_bus_info")
      .load()
    //3 处理数据
    val kafkaValDf: DataFrame = kafkaDf.selectExpr("CAST(value AS STRING)")
    //转为ds
    val kafkaDs: Dataset[String] = kafkaValDf.as[String]
    //解析出经纬度数据，写入redis
    //封装为一个case class方便后续获取指定字段的数据
    val busInfoDs: Dataset[BusInfo] = kafkaDs.map(BusInfo(_)).filter(_ != null)
    //把经纬度数据写入Mysql
    busInfoDs.writeStream
      .foreach(new MySqlWriter)
      .outputMode("append")
      .start()

    spark.streams.awaitAnyTermination()
  }
}
