package com.shujia.spark.stream

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe

import java.sql.{Connection, Driver, DriverManager, PreparedStatement}

object Demo8SparkStreamOnKafka {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[2]")
      .appName("card")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    ssc.checkpoint("data/checkpoint")

    /**
     * earliest
     * 当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费
     * latest  默认
     * 当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的数据
     * none
     * topic各分区都存在已提交的offset时，从offset后开始消费；只要有一个分区不存在已提交的offset，则抛出异常
     *
     */

    val kafkaParams: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> "master:9092,node1:9092,node2:9092", //kafka 集群列表
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "Demo8SparkStreamOnKafka", //消费者组
      "auto.offset.reset" -> "earliest", //读取数据的位置
      "enable.auto.commit" -> (false: java.lang.Boolean) //是否自动提交消费便宜了量
    )

    /**
     * 是否自动提交消费偏移量（消费者消费的位置）
     * enable.auto.commit = false， 如果不自动提交，每一次重启都可以读取到之前的数据
     * 提交偏移量的间隔时间
     * auto.commit.interval.ms = 5000
     * 默认将消费偏移量提交到kafka的一个topic中（consumer_offsets）
     *
     */

    val topics: Array[String] = Array("hadoop-namenode-log")

    //读取kafka数据创建Stream
    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )

    //解析数据
    val linesDS: DStream[String] = stream.map(record => record.value())

    //过滤脏数据
    val filterDS: DStream[String] = linesDS.filter(_.split(" ").length > 3)

    //统计每一种类型的日志数量
    val logTypeDS: DStream[String] = filterDS.map(log => {
      val logType: String = log.split(" ")(2)
      logType
    })
    //取出日志类型
    val logTypes: List[String] = List("INFO", "ERROR", "WHEN", "DEBUG", "FATAL")
    val filterLogTypeDS: DStream[String] = logTypeDS.filter(logTpye => logTypes.contains(logTpye))

    /**
     * 统计每一个类型的数量
     */
    val kvDS: DStream[(String, Int)] = filterLogTypeDS.map((_, 1))

    //updateStateByKey 有状态算子，需要开启checkpoint
    val countDS: DStream[(String, Int)] = kvDS
      .updateStateByKey((seq: Seq[Int], state: Option[Int]) => Option(seq.sum + state.getOrElse(0)))


    /**
     * 将统计好的结果保存到数据库中
     */

    //转换成rdd
    countDS.foreachRDD(rdd => {
      //遍历分区
      rdd.foreachPartition(iter => {
        //创建数据库链接
        Class.forName("com.mysql.jdbc.Driver")
        val con: Connection = DriverManager.getConnection("jdbc:mysql://master:3306/bigdata", "root", "123456")

        //replace int 如果不存在就插入，如果存在就替换
        val stat: PreparedStatement = con.prepareStatement("replace into log_type_num(log_type,num) values(?,?)")

        //循环每一个分区中的数据，将数据写入mysql
        iter.foreach {
          case (logType: String, num: Int) =>
            //指定字段
            stat.setString(1, logType)
            stat.setInt(2, num)
            stat.execute()
        }

        stat.close()
        con.close()
      })
    })


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
