package com.atguigu.sparkstreaming.exactlyonce

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.atguigu.sparkstreaming.utils.JDBCUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * Created by Smexy on 2022/8/22
 *
 *
 *  原理：   at least once + 事务输出 =   exactly once
 *          offsets 和 data 都在关系型数据库存储。
 *
 *      ①程序启动前，要从关系型数据库读取 上次提交的 offsets
 *      ②基于上次提交的offsets获取一个，从提交位置向后消费的 DStream
 *      ③获取当前批次数据的offsets
 *      ④进行业务的计算
 *      ⑤将分布式计算的结果收集到Driver端，和offsets在一个事务中写出
 *
 * --------------------------------
 *  以wordcount为例(有状态的计算，累积)。
 *
 *  -------------------------
 *  设计mysql中，data和offsets存储的表结构。
 *      wordcount: 粒度是  一个单词是一行
 *      offsets  : 粒度是 一个组消费一个主题的一个分区是一行
 *
 *
 *
 *
 *
 */
object TransactionExactlyOnceDemo {

  val groupId = "sz220409test"
  val topic = "topicD"

  //根据groupId和topic,从关系型数据库读取 上次提交的 offsets
  def readHistoryOffsetsFromMysql(groupId:String,topic:String):mutable.Map[TopicPartition, Long]={

    //准备要返回的结果
    val offsets = new mutable.HashMap[TopicPartition, Long]()

    //使用JDBC的方式读取上次提交的offsets
    val sql =
      """
        |
        |select
        |   partitionId,untilOffset
        |from offsets
        |where groupId = ? and topic = ?
        |
        |""".stripMargin

    var connection:Connection = null
    var ps: PreparedStatement = null

    try {
       connection = JDBCUtil.getConnection()
       ps = connection.prepareStatement(sql)
      ps.setString(1, groupId)
      ps.setString(2, topic)
      val resultSet: ResultSet = ps.executeQuery()

      while (resultSet.next()) {
        offsets.put(
          new TopicPartition(topic, resultSet.getInt("partitionId")),
          resultSet.getLong("untilOffset"))
      }
    } catch {
      case e:Exception =>{
        e.printStackTrace()
      }
    } finally {
      if (ps != null){
        ps.close()
      }

      if (connection != null){
        connection.close()
      }

    }

    offsets

  }

  //将分布式计算的结果收集到Driver端，和offsets在一个事务中写出
  def writeDataAndOffsetsInATransction(data: Array[(String, Int)], offsets: Array[OffsetRange]): Unit = {

    //有状态的计算，每次写入mysql前，要和上次这个word的count进行运算之后再写入

    // count : 代表mysql中要写入的word已经写入的值， VALUES(COUNT)代表当前批次要写入的值
    val sql1=
      """
        |
        |INSERT INTO `wordcount` VALUES(?,?)
        |ON DUPLICATE KEY UPDATE COUNT = COUNT + VALUES(COUNT)
        |
        |
        |""".stripMargin

    val sql2=
      """
        |
        |REPLACE INTO `offsets` VALUES(?,?,?,?)
        |
        |""".stripMargin


    var connection:Connection = null
    var ps1: PreparedStatement = null
    var ps2: PreparedStatement = null

    try {
      connection = JDBCUtil.getConnection()

      //体现事务: 取消事务的自动提交，改为手动去提交
      connection.setAutoCommit(false)

      ps1 = connection.prepareStatement(sql1)
      ps2 = connection.prepareStatement(sql2)

      /*
          填充占位符

          for循环每运行一次，生成一条 insert sql
              INSERT INTO `wordcount` VALUES(‘a’,5) ON DUPLICATE KEY UPDATE COUNT = COUNT + 5
                 ....
       */
      for ((word, count) <- data) {
        ps1.setString(1,word)
        ps1.setLong(2,count)

        //一条sql生成后，不着急发给mysql，而是攒起来，攒够一批，一起发
        ps1.addBatch()
      }

      //一个offsetRange代表当前批次所消费的一个分区的偏移量
      for (offsetRange <- offsets) {

        ps2.setString(1,groupId)
        ps2.setString(2,topic)
        ps2.setInt(3,offsetRange.partition)
        //当前批次最后消费的位置，提交的是这个参数
        ps2.setLong(4,offsetRange.untilOffset)

        ps2.addBatch()

      }

      //批处理，一次性发给mysql
      val dataSuccess: Array[Int] = ps1.executeBatch()
      val offsetsSuccess: Array[Int] = ps2.executeBatch()

      //体现事务: 提交事务
      connection.commit()

      println("数据写成功了:" + dataSuccess.size)
      println("offsets写成功了:" + offsetsSuccess.size)


    } catch {
      case e:Exception =>{
        //体现事务: 出现异常，回滚事务
        connection.rollback()
        e.printStackTrace()

      }
    } finally {
      if (ps1 != null){
        ps1.close()
      }

      if (ps2 != null){
        ps2.close()
      }

      if (connection != null){
        connection.close()
      }

    }

  }


  def main(args: Array[String]): Unit = {


    //①程序启动前，要从关系型数据库读取 上次提交的 offsets
    val offsets: mutable.Map[TopicPartition, Long] = readHistoryOffsetsFromMysql(groupId, topic)

    //默认   slide = window = batchduration
    val streamingContext = new StreamingContext("local[*]", "wordcount", Seconds(5))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop102:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "latest",
      //①取消offsets的自动提交
      "enable.auto.commit" -> "false"
    )

    val topics = Array(topic)

    val ds: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      streamingContext,
      PreferConsistent,
      //②基于上次提交的offsets获取一个，从提交位置向后消费的 DStream
      Subscribe[String, String](topics, kafkaParams,offsets)
    )

    ds.foreachRDD(rdd => {

      //来数据时，再执行计算逻辑
      if (!rdd.isEmpty()){

        //③获取当前批次数据的offsets
        val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        //④进行业务的计算，单词统计
        val ds1: RDD[(String, Int)] = rdd.flatMap(record => record.value().split(" "))
          .map(word => (word, 1))
          .reduceByKey(_ + _)

        //⑤将分布式计算的结果收集到Driver端，和offsets在一个事务中写出
        val data: Array[(String, Int)] = ds1.collect()

        writeDataAndOffsetsInATransction(data,ranges)

        //同时把offsets也提交到kafka.没用，下次读取从mysql读
        ds.asInstanceOf[CanCommitOffsets].commitAsync(ranges)


      }

    })


    streamingContext.start()

    streamingContext.awaitTermination()

  }

}
