package com.atguigu.sparkstreaming.demos

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.atguigu.sparkstreaming.utils.JDBCUtil
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * Created by Smexy on 2022/5/23
 *
 *
 *  当前的计算方式：   有状态的计算。
 *                        将当前批次消费到的数据进行聚合(单词统计)，再和之前统计的结果，再次聚合，得到累积聚合的结果。
 *
 *                          之前批次:  a,4
 *                          当前批次:  a,2
 *
 *                          最终输出:    a,4+2
 *
 *                          第一次:  a,6
 *                          第二次:  a,8
 *                          第三次:  a,10
 *
 *                  不支持幂等输出。采取事务输出。以wordcount为例。
 *
 *  --------------------
 *    设计数据和offsets在mysql中存储的表
 *
 * CREATE TABLE `offsets` (
 * `groupId` varchar(100) NOT NULL,
 * `topic` varchar(100) NOT NULL,
 * `partitionId` int(11) NOT NULL,
 * `untiloffset` bigint(20) DEFAULT NULL,
 * PRIMARY KEY (`groupId`,`topic`,`partitionId`)
 * ) ENGINE=InnoDB DEFAULT CHARSET=utf8
 *
 * CREATE TABLE `wordcount` (
 * `word` varchar(100) NOT NULL,
 * `count` bigint(20) DEFAULT NULL,
 * PRIMARY KEY (`word`)
 * ) ENGINE=InnoDB DEFAULT CHARSET=utf8
 *
 * ----------------------
 *  ①读取外部存储中上次提交的offsets信息
 *        Map[TopicPartition,Long]
 *  ②使用KafkaUtils.createDirectStream，从offsetMap位置获取一个消费的流
 *
 *  ③各种运算
 *        a) 获取偏移量
 *        b) 计算结果，需要把结果收集到Driver端的本地
 *                事务输出只适合聚合类的运算
 *        c) 在一个事务中写出  计算结果和偏移量
 *
 * ----------------------
 *    Mybatis(JDBC)
 *
 *    apache-commons DBUtils Java版
 *      改写为scala版
 *
 *     纯scala：  scalalikeJDBC
 *
 *
 *
 */
object ExactlyOnceDemo2 {

  val groupId:String ="atguigu1227"
  val topic:String ="topicA"

  /*
      根据groupId和要消费的topic查询之前提交的offsets信息
   */
  def readHistoryOffsets(groupId:String,topic:String):Map[TopicPartition,Long]={

    val offsetsMap = new mutable.HashMap[TopicPartition, Long]()

    val sql=
      """
        |
        |select
        |   *
        |from offsets
        |where groupId=? and topic=?
        |
        |""".stripMargin

    var conn:Connection = null
    var ps: PreparedStatement = null



    try {
      conn = JDBCUtil.getConnection()
       ps = conn.prepareStatement(sql)
      ps.setString(1, groupId)
      ps.setString(2, topic)

      val resultSet: ResultSet = ps.executeQuery()
      while (resultSet.next()) {

        //当前组上次提交的消费当前主题的每一个分区的偏移量
        offsetsMap.put(new TopicPartition(topic, resultSet.getInt("partitionId")), resultSet.getLong("untiloffset"))

      }
    } catch {
      case e:Exception =>e.printStackTrace()
    } finally {

      if(ps != null){
        ps.close()

      }

      if(conn != null){

        conn.close()
      }

    }


    offsetsMap.toMap

  }

  def writeDataAndOffsetsInCommonBatch(result: Array[(String, Int)], offsetRanges: Array[OffsetRange]):Unit = {

    // 累积的单词统计，有状态的计算
    val sql1=
      """
        |
        |INSERT INTO wordcount VALUES(?,?)
        |ON DUPLICATE KEY UPDATE count=count + values(count)
        |
        |
        |
        |""".stripMargin

    val sql2=
      """
        |replace into offsets values(?,?,?,?)
        |
        |
        |
        |""".stripMargin


    var conn:Connection = null
    var ps1: PreparedStatement = null
    var ps2: PreparedStatement = null



    try {
      conn = JDBCUtil.getConnection()

      //取消事务的自动提交，改为手动
      conn.setAutoCommit(false)

      ps1 = conn.prepareStatement(sql1)
      ps2 = conn.prepareStatement(sql2)

      //构造写入data的insert语句
      for ((word, count) <- result) {

        //写当前sparkstreaming批次的一行
        ps1.setString(1,word)
        ps1.setLong(2,count)

        //攒到一个batch中，批量发送给mysql
        ps1.addBatch()

      }

      // 构造写入offset的insert语句
      for (offsetRange <- offsetRanges) {

        ps2.setString(1,groupId)
        ps2.setString(2,topic)
        ps2.setInt(3,offsetRange.partition)
        ps2.setLong(4,offsetRange.untilOffset)

        ps2.addBatch()

      }

      //开始写
      val dataReponse: Array[Int] = ps1.executeBatch()
      val offsetReponse: Array[Int] = ps2.executeBatch()

      conn.commit()

      println("数据写成功:"+dataReponse.size)
      println("偏移量写成功:"+offsetReponse.size)



    } catch {
      case e:Exception =>{
        conn.rollback()
        e.printStackTrace()
      }
    } finally {

      if(ps1 != null){
        ps1.close()

      }

      if(ps2 != null){
        ps2.close()

      }

      if(conn != null){

        conn.close()
      }

    }


  }

  def main(args: Array[String]): Unit = {

    val streamingContext = new StreamingContext("local[*]", "SparkStreamingKafkaDemo", Seconds(5))


    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop102:9092,hadoop103:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "earliest",
      // ①取消offsets的自动提交
      "enable.auto.commit" -> "false"
    )


    val topics1 = Array(topic)

    val offsetMap: Map[TopicPartition, Long] = readHistoryOffsets(groupId, topic)


    // 从之前查询的偏移量，获取一个向后消费的流
    val stream = KafkaUtils.createDirectStream[String, String](
      streamingContext,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics1, kafkaParams,offsetMap)
    )

    stream.foreachRDD(rdd => {

      // 只有RDD不为空，消费到数据了，才执行后续的运算
      if (!rdd.isEmpty()){

        //获取偏移量
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        // 单次统计的结果，收集到driver端
        val result: Array[(String, Int)] = rdd.map(_.value()).flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _).collect()

        // 在一个事务中提交结果和偏移量
        writeDataAndOffsetsInCommonBatch(result,offsetRanges)
      }
    })

    // 启动app
    streamingContext.start()

    // 阻塞当前线程，让程序24h不停运行
    streamingContext.awaitTermination()


  }

}
