package com.atguigu.sparkstreaming.examples

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.atguigu.sparkstreaming.utils.JDBCUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * Created by Smexy on 2022/7/15
 *
 *
 *
// begin from the offsets committed to the database
val fromOffsets = selectOffsetsFromYourDatabase.map { resultSet =>
  new TopicPartition(resultSet.string("topic"), resultSet.int("partition")) -> resultSet.long("offset")
}.toMap

val stream = KafkaUtils.createDirectStream[String, String](
  streamingContext,
  PreferConsistent,
  Assign[String, String](fromOffsets.keys.toList, kafkaParams, fromOffsets)
)

stream.foreachRDD { rdd =>
  val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

  val results = yourCalculation(rdd)

  // begin your transaction

  // update results
  // update offsets where the end of existing offsets matches the beginning of this batch of offsets
  // assert that offsets were updated correctly

  // end your transaction
}

      ①查询之前写入数据库的偏移量 Offsets
      ②从 offsets位置获取一个流
       ③  自己的运算，得到结果
       ④  在一个事务中把结果和偏移量 写出到数据库

    以wordcount(累加)为例
        设计mysql中怎么存储数据，offsets
          数据:
                粒度：  一个word是一行
                主键:   word

          offset:   groupId,topic,partitionid,offset
                粒度：   一个组消费一个主题的一个分区是一行
                主键:   (groupId,topic,partitionid)
 *
 */
object ExactlyOnceTransactionDemo {

  val groupId = "2203092"
  val topic = "topicA"

  //查询mysql中已经提交的偏移量   根据当前组所消费的topic去查
  def selectOffsetsFromMysql(groupId:String,topic:String):Map[TopicPartition, Long]={

    val offsets = new mutable.HashMap[TopicPartition, Long]()

    val sql=
      """
        |
        |select
        |   partitionId,offset
        |from offsets
        |where groupId=? and topic=?
        |
        |
        |""".stripMargin

    var connection: Connection = null
    var ps: PreparedStatement = null

    try {
      connection = JDBCUtil.getConnection()

      ps = connection.prepareStatement(sql)
      ps.setString(1,groupId)
      ps.setString(2,topic)

      val resultSet: ResultSet = ps.executeQuery()

      while(resultSet.next()){

        offsets.put(new TopicPartition(topic,resultSet.getInt("partitionId")), resultSet.getLong("offset") )

      }

    } catch {
      case e:Exception =>{

        e.printStackTrace()

        throw new RuntimeException("查询偏移量失败!")

      }
    } finally {

      if (ps != null){

        ps.close()

      }

      if (connection != null){

        connection.close()
      }

    }


    //可变转不可变
    offsets.toMap

  }


  def wirteResultAndOffsetsInCommonTransaction(result: Array[(String, Int)], ranges: Array[OffsetRange]): Unit = {

    /*
        写单词。 累加。 将当前单词在数据库中已经存在的部分和当前批次计算的相同单词的值相加
     */
    val sql1=
      """
        |INSERT INTO `wordcount` VALUES(?,?)
        |ON DUPLICATE KEY UPDATE COUNT=COUNT + VALUES(COUNT)
        |
        |
        |""".stripMargin

    //写偏移量
    val sql2=
      """
        |INSERT INTO `offsets` VALUES(?,?,?,?)
        |ON DUPLICATE KEY UPDATE OFFSET=VALUES(OFFSET)
        |
        |""".stripMargin

    val sql3=
      """
        |replace INTO `offsets` VALUES(?,?,?,?)
        |
        |
        |""".stripMargin

    var connection: Connection = null
    var ps1: PreparedStatement = null
    var ps2: PreparedStatement = null

    try {
      connection = JDBCUtil.getConnection()

      //开启事务 取消事务的自动提交，改为手动提交
      connection.setAutoCommit(false)

      ps1 = connection.prepareStatement(sql1)
      ps2 = connection.prepareStatement(sql2)

      /*
            result:  [ (a,1),(b,2),(c,3)   ]

            每遍历一个单词，生成一条 insert into的语句
                INSERT INTO `wordcount` VALUES(‘a’,1) ON DUPLICATE KEY UPDATE COUNT=COUNT + VALUES(COUNT)
                INSERT INTO `wordcount` VALUES(‘b’,2) ON DUPLICATE KEY UPDATE COUNT=COUNT + VALUES(COUNT)
                INSERT INTO `wordcount` VALUES(‘c’,3) ON DUPLICATE KEY UPDATE COUNT=COUNT + VALUES(COUNT)
       */
      for ((word, count) <- result) {

        ps1.setString(1,word)
        ps1.setLong(2,count)

        //攒起来
        ps1.addBatch()

      }

      for (offsetRange <- ranges) {

        ps2.setString(1,groupId)
        ps2.setString(2,topic)
        ps2.setInt(3,offsetRange.partition)
        ps2.setLong(4,offsetRange.untilOffset)

        ps2.addBatch()

      }

      val res1: Array[Int] = ps1.executeBatch()
      val res2: Array[Int] = ps2.executeBatch()

      //提交事务
      connection.commit()

      println("数据写入:"+res1.size)
      println("偏移量写入:"+res2.size)


    } catch {
      case e:Exception =>{

        //回滚事务
        connection.rollback()

        e.printStackTrace()

        throw new RuntimeException("写入失败!")

      }
    } finally {

      if (ps1 != null){

        ps1.close()

      }
      if (ps2 != null){

        ps2.close()

      }

      if (connection != null){

        connection.close()
      }

    }




  }

  def main(args: Array[String]): Unit = {

    //第一步：查询偏移量
    val offsetsMap: Map[TopicPartition, Long] = selectOffsetsFromMysql(groupId, topic)

    val streamingContext = new StreamingContext("local[*]", "TransformDemo", Seconds(10))

    //所有的消费者参数都可以在 ConsumerConfig
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop102:9092,hadoop103:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "latest",
      // 第一步
      "enable.auto.commit" -> "false"
    )

    val topics = Array(topic)

    //第二步： 从查询的偏移量位置向后消费
    val ds: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      streamingContext,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams,offsetsMap)
    )

    ds.foreachRDD(rdd => {

      if (!rdd.isEmpty()){
        //第三步：获取偏移量   Driver端
        val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        //第四步：转换运算
        val result: Array[(String, Int)] = rdd.flatMap(record => record.value().split(" "))
          .map(word => (word, 1))
          .reduceByKey(_ + _).collect()

        //第五步： 将result和ranges在一个事务中写出
        wirteResultAndOffsetsInCommonTransaction(result,ranges)
      }

    })

    // 启动APP
    streamingContext.start()

    // 阻塞进程，让进程一直运行
    streamingContext.awaitTermination()

  }

}
