package com.atguigu.sparkstreaming.demos

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.atguigu.sparkstreaming.utils.JDBCUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * Created by Smexy on 2022/6/24
 *
 *  以全局累加单词为例。
 *      把数据和offsets在一个事务中写入到数据库(RDMS)。
 *
 *      结果：   (word,count)
 *
 * CREATE TABLE `wordcount` (
 * `word` varchar(100) NOT NULL,
 * `count` bigint(20) DEFAULT NULL,
 * PRIMARY KEY (`word`)
 * ) ENGINE=InnoDB DEFAULT CHARSET=utf8
 *
 *      offset:  ((groupId,topic,partitionId),offset)
 *
 *
 * CREATE TABLE `offsets` (
 * `groupId` varchar(100) NOT NULL,
 * `topic` varchar(100) NOT NULL,
 * `partitionId` int(11) NOT NULL,
 * `offset` bigint(20) DEFAULT NULL,
 * PRIMARY KEY (`groupId`,`topic`,`partitionId`)
 * ) ENGINE=InnoDB DEFAULT CHARSET=utf8
 *
 *    at least once + 事务输出 = exactly once
 *
 *    -------------
 *      只适合聚合类运算！
 *
 *      ①先从数据库中查询上一次提交的offsets
 *      ②基于上次提交的offsets位置，获取一个DS
 *      ③获取当前批次的偏移量
 *      ④转换运算，将结果拉取到Driver端
 *      ⑤开启事务，在事务中写出计算的结果和偏移量
 */
object ExactlyOnceDemo2 {

  val groupId = "test1"
  val topic = "topicA"

  def selectHistoryOffsetsFromMysql(groupId:String,topic:String ):Map[TopicPartition, Long]={

    val offests = new mutable.HashMap[TopicPartition, Long]()

    val sql=
      """
        |
        |select
        |   partitionId,offset
        |from offsets
        |where groupId=? and topic=?
        |
        |""".stripMargin

    var connection: Connection = null
    var ps: PreparedStatement =  null
    try {
      connection = JDBCUtil.getConnection()
      ps= connection.prepareStatement(sql)
      ps.setString(1, groupId)
      ps.setString(2, topic)

      val resultSet: ResultSet = ps.executeQuery()

      while (resultSet.next()) {

        offests.put(new TopicPartition(topic, resultSet.getInt("partitionId")), resultSet.getLong("offset"))

      }
    } catch {
      case e:Exception => {
        e.printStackTrace()
        throw new RuntimeException("查询偏移量失败!")
      }
    } finally {

      if (ps != null){
        ps.close()
      }

      if (connection != null){
        connection.close()
      }

    }

    //把可变的map转不可变
    offests.toMap

  }


  def writeResultAndOffsetsInCommonTranscation(result: Array[(String, Int)], ranges: Array[OffsetRange]): Unit = {

    val sql1=
      """
        |
        |INSERT INTO wordcount VALUES(?,?)
        |ON DUPLICATE KEY UPDATE count = values(count) + count
        |
        |
        |""".stripMargin

    val sql2=
      """
        |
        |INSERT INTO offsets VALUES(?,?,?,?)
        |ON DUPLICATE KEY UPDATE offset = values(offset)
        |
        |
        |""".stripMargin

    var connection: Connection = null
    var ps1: PreparedStatement =  null
    var ps2: PreparedStatement =  null
    try {
      connection = JDBCUtil.getConnection()

      //手动去控制事务的提交
      connection.setAutoCommit(false)

      ps1= connection.prepareStatement(sql1)
      ps2= connection.prepareStatement(sql2)

      for ((word, count) <- result) {

        ps1.setString(1,word)
        ps1.setLong(2,count)

        //攒起来
        ps1.addBatch()

      }

      for (offsetRange <- ranges) {

        ps2.setString(1,groupId)
        ps2.setString(2,topic)
        ps2.setInt(3,offsetRange.partition)
        ps2.setLong(4,offsetRange.untilOffset)

        ps2.addBatch()

      }

      //执行写出
      val res1: Array[Int] = ps1.executeBatch()

      val res2: Array[Int] = ps2.executeBatch()

      //提交事务
      connection.commit()

      println("当前写出数据:"+res1.size)
      println("当前写出分区偏移量:"+res2.size)


    } catch {
      case e:Exception => {
        //回滚事务
        connection.rollback()
        e.printStackTrace()
        throw new RuntimeException("查询偏移量失败!")
      }
    } finally {

      if (ps1 != null){
        ps1.close()
      }

      if (ps2 != null){
        ps2.close()
      }

      if (connection != null){
        connection.close()
      }

    }

  }

  def main(args: Array[String]): Unit = {

    val streamingContext = new StreamingContext("local[*]", "simpledemo", Seconds(5))

    //①先从数据库中查询上一次提交的offsets
    val offsets: Map[TopicPartition, Long] = selectHistoryOffsetsFromMysql(groupId, topic)

    // 封装kafka的消费者参数
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop102:9092,hadoop103:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "latest",
      //第一步: 取消自动提交
      "enable.auto.commit" -> "false"
    )

    val topics = Array(topic)

    //②基于上次提交的offsets位置，获取一个DS
    val ds: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      streamingContext,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams,offsets)
    )

    ds.foreachRDD { rdd =>
      if (!rdd.isEmpty()){
        //③获取当前批次的偏移量
        val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        //④转换运算，将结果拉取到Driver端
        val result: Array[(String, Int)] = rdd.map(record => (record.value(), 1)).reduceByKey(_ + _).collect()

        //⑤开启事务，在事务中写出计算的结果和偏移量
        writeResultAndOffsetsInCommonTranscation(result,ranges)
      }
    }

    streamingContext.start()

    streamingContext.awaitTermination()

  }

}
