package com.itcast.spark.kafka

import java.lang
import java.sql.{DriverManager, ResultSet}

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

/**
 * DESC:这是SparkStreaming处理Kafka的数据的010版本，使用的是lowlevel的api，也就是offset管理是在kafka的topic中
 * 1-准备环境StreamingContext,引入处理时间，这里设置为5s
 * 2-准备KafakUtils.creatDirectStream的方式读取kafka中的topic的数据，引入必要配置
 * 3-首先实现Kafka和SparkStreaming整合的WordCount版本(思考：这里kafka数据结构解析)
 * 4-将结果print输出
 * 5-ssc.start
 * 6.ssc.awaitTermination
 * 7.ssc.stop(true,true)
 */
object _0511KafkaSparkStreaming010OffsetToMySQLErrorChange {

  def updateFunc(currentValue: Seq[Int], historyValue: Option[Int]): Option[Int] = {
    val sum: Int = currentValue.sum + historyValue.getOrElse(0)
    Some(sum)
  }

  val CHDIR = "./datasets/checkpoint/ck17"


  def main(args: Array[String]): Unit = {
    //1-准备环境

    //这里就是用checkpoint中状态和偏移量的回复
    //如果Application是首次启动直接重建StreaminngCOntext
    //如果Applcation执行失败就会通过CHDRIR的目录中存放的状态和偏移量进行还原
    // Function to create and setup a new StreamingContext
    //1-准备环境

    def functionTest(): StreamingContext = {
      //1-准备环境StreamingContext,引入处理时间，这里设置为5s
      val conf: SparkConf = new SparkConf().setAppName("_051KafkaSparkStreaming010OffsetToMySQLError").setMaster("local[*]")
      val sc = new SparkContext(conf)
      sc.setLogLevel("WARN")
      //这里就是指定配置项将数据按照5秒为周期进行处理
      val ssc = new StreamingContext(sc, Seconds(5))
      ssc.checkpoint(CHDIR)
      //4-这一步是核心的进行从Kafka中获取topic的数据进行数据的处理的部分
      ProcessData(ssc)
      //返回ssc
      ssc
    }

    val ssc: StreamingContext = StreamingContext.getActiveOrCreate(CHDIR, functionTest _)

    //5-ssc.start
    ssc.start()
    //6.ssc.awaitTermination
    ssc.awaitTermination()
    //7.ssc.stop(true,true)
    ssc.stop(true, true)
  }

  def ProcessData(ssc: StreamingContext): Unit = {
    //2-准备KafakUtils.creatDirectStream的方式读取kafka中的topic的数据，引入必要配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "node01:9092,node02:9092,node03:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "each_stream",
      //设置重置偏移量为最新的偏移量
      "auto.offset.reset" -> "latest",
      //设置提交的方式是手动提交，如果是true自动提交，默认提交到checkpoint中
      "enable.auto.commit" -> (false: lang.Boolean)
    )

    val OffSetMap: mutable.Map[TopicPartition, Long] = OffsetUtil.getOffsetMap("each_stream", "kafkatopic")
    //这里完成了OffSet的从Musql中读取或从最新偏移量处开始消费
    val receiveData: InputDStream[ConsumerRecord[String, String]] = if (OffSetMap.size > 0) {
      println("mysql中就是有offset的数据，直接开始消费")
      //如果map中size大于0，说明在mysql中就是有offset的数据
      KafkaUtils.createDirectStream[String, String](ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](
          Array("kafkatopic"),
          //offsets=collection.Map[TopicPartition(topic,partition,offset), Long]
          kafkaParams, OffSetMap)
      )
    } else {
      println("mysql中没有offset的数据，直接从最新的偏移量开始消费")
      //如果map中size小于0，说明在mysql中就没有offset数据，重新开始从最新的偏移量处开始消费
      KafkaUtils.createDirectStream[String, String](ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](
          Array("kafkatopic"),
          //offsets=collection.Map[TopicPartition(topic,partition,offset), Long]
          kafkaParams)
      )
    }

    //需求将kafka的数据读取处理，分析他的offset以及基本的信息
    //Apply a function to each RDD in this DStream.
    receiveData.foreachRDD(rdd => {
      //这里通过判断确定kafak的topic有值
      if (rdd.count() > 0) {
        rdd.foreach(f => {
          //打印以下topic的信息
          println("topic value is:", f)
          //(topic value is:,ConsumerRecord(topic = kafkatopic, partition = 1, offset = 51, CreateTime = 1596481019856, serialized key size = -1, serialized value size = 13, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = hellosa spark))
          //打印以下topic的value信息
          println("topic true value is:", f.value())
        })
      } //end if
      //这里接下来分析如何得到startoffset和结束的offset
      //1-如何获取offset
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      //1-如何打印offset
      for (o <- offsetRanges) {
        println(s"topic：${o.topic} partition:${o.partition} startOffSet:${o.fromOffset} utilOffSet:${o.untilOffset}")
      }
      println("=========================================================")
      //手动提交offset
      //这里默认将offsetranges提交到checkpoint中，但是在实际中会将offset保存在mysql中或redis中
      //方式1：将偏移量放在checkpoint中
      receiveData.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
      //方式1：将偏移量放在mysql中
      OffsetUtil.saveOffsetRanges("each_stream", offsetRanges)
    })

    //    3-首先实现Kafka和SparkStreaming整合的WordCount版本(思考：这里kafka数据结构解析)
    val dataValue: DStream[String] = receiveData.map(x => x.value())
    val result: DStream[(String, Int)] = dataValue
      .flatMap(_.split("\\s+"))
      .map(x => (x, 1))
      .updateStateByKey(updateFunc)
    //4-将结果print输出
    result.print()
  }

  //这里offset引入写入mysql的工具类
  /*
  手动维护offset的工具类
  首先在MySQL创建如下表
    CREATE TABLE `t_offset` (
      `topic` varchar(255) NOT NULL,
      `partition` int(11) NOT NULL,
      `groupid` varchar(255) NOT NULL,
      `offset` bigint(20) DEFAULT NULL,
      PRIMARY KEY (`topic`,`partition`,`groupid`)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
   */
  object OffsetUtil {
    //从数据库读取偏移量
    def getOffsetMap(groupid: String, topic: String) = {
      val connection = DriverManager.getConnection("jdbc:mysql://node01:3306/bigdata?characterEncoding=UTF-8", "root", "123456")
      val pstmt = connection.prepareStatement("select * from t_offset where groupid=? and topic=?")
      pstmt.setString(1, groupid)
      pstmt.setString(2, topic)
      val rs: ResultSet = pstmt.executeQuery()
      val offsetMap = mutable.Map[TopicPartition, Long]()
      while (rs.next()) {
        //TODO:这里是根据kafkaUtil.createDirectStream方法确定的
        //key-rs.getString("topic") rs.getInt("partition"))
        //value-- -> rs.getLong("offset")
        offsetMap += new TopicPartition(rs.getString("topic"), rs.getInt("partition")) -> rs.getLong("offset")
      }
      rs.close()
      pstmt.close()
      connection.close()
      offsetMap
    }

    //将偏移量保存到数据库
    def saveOffsetRanges(groupid: String, offsetRange: Array[OffsetRange]) = {
      val connection = DriverManager.getConnection("jdbc:mysql://node01:3306/bigdata?characterEncoding=UTF-8", "root", "123456")
      //replace into表示之前有就替换,没有就插入
      val pstmt = connection.prepareStatement("replace into t_offset (`topic`, `partition`, `groupid`, `offset`) values(?,?,?,?)")
      for (o <- offsetRange) {
        pstmt.setString(1, o.topic)
        pstmt.setInt(2, o.partition)
        pstmt.setString(3, groupid)
        pstmt.setLong(4, o.untilOffset)
        pstmt.executeUpdate()
      }
      pstmt.close()
      connection.close()
    }
  }

}
