package com.zhao.algorithm.utils

import java.sql.{Connection, PreparedStatement, ResultSet}

import kafka.common.TopicAndPartition
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.kafka010.OffsetRange

import scala.collection.mutable

/**
 * Description: 自动保存offset到Mysql,保证没有处理的数据,偏移量不移动<br/>
 * Copyright (c) ，2020 ， 赵 <br/>
 * This program is protected by copyright laws. <br/>
 * Date： 2020/11/23 9:55
 * * // MySQL 建表语句
 *
 * CREATE TABLE `pre_kafka_offset` (
 * `id` int(11) unsigned NOT NULL AUTO_INCREMENT,
 * `app_group_id` varchar(20) CHARACTER SET latin1 DEFAULT NULL,
 * `topic` varchar(20) CHARACTER SET latin1 DEFAULT NULL,
 * `patition` smallint(3) DEFAULT NULL,
 * `offset` bigint(20) DEFAULT NULL,
 * PRIMARY KEY (`id`)
 * ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8
 *
 * @author 柒柒
 * @version : 1.0
 */

object OffsetUtils {
  def queryHistoryOffsetFromMySQL(appId: String, groupId: String): Map[TopicPartition, Long] = {
    val offsetMap = new mutable.HashMap[TopicPartition, Long]()
    //查询MySQL
    var connection: Connection = null
    var statement: PreparedStatement = null
    var resultSet: ResultSet = null
    try {
      connection = MetadataJDBCUtil.getConn()
      statement = connection.prepareStatement("SELECT `topic_partition`, `offset` FROM pre_kafka_offset WHERE app_group_id = ?")
      statement.setString(1, appId + "|" + groupId)
      resultSet = statement.executeQuery()
      //变量结果集
      while (resultSet.next()) {
        val topic_partition = resultSet.getString(1)
        val offset = resultSet.getLong(2)
        val fields = topic_partition.split("\\|")
        val topic = fields(0)
        println(fields(0), fields(1))
        val partition = fields(1).toInt
        val topicPartition = new TopicPartition(topic, partition)
        offsetMap(topicPartition) = offset
      }
    } catch {
      case e: Exception => {
        throw new RuntimeException("查询历史偏移量出现异常...", e)
      }
    } finally {
      if (resultSet != null) {
        resultSet.close()
      }
      if (statement != null) {
        statement.close()
      }
      if (connection != null) {
        MetadataJDBCUtil.releaseCon(connection)
      }
    }
    offsetMap.toMap
  }

  /**
   *
   * 将正在处理的 KAFKA 的 OFFSET 手动提交至MySQL
   *
   * @param ssc         streamingContext
   * @param appId       app id
   * @param groupId     group id
   * @param offsetRange rdd的offset信息
   */

  def sendOffsetToMySQL(ssc: StreamingContext, appId: String, groupId: String, offsetRange: Array[OffsetRange]) = {

    //插入MySQL
    var connection: Connection = null
    var pstm: PreparedStatement = null
    try {
      connection = MetadataJDBCUtil.getConn()
      // 开启事务
      connection.setAutoCommit(false)
      pstm = connection.prepareStatement("INSERT INTO pre_kafka_offset (`app_group_id`,`topic_partition`,`offset`) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE `offset` = ? ")
      for (range <- offsetRange) {
        val topic = range.topic
        val partition = range.partition
        val offset = range.untilOffset
        //
        pstm.setString(1, appId + "|" + groupId)
        pstm.setString(2, topic + "|" + partition)
        pstm.setLong(3, offset)
        pstm.setLong(4, offset)
        // 执行 update sql
        pstm.executeUpdate()
      }
      // 提交事务
      connection.commit()
    } catch {
      case e: Exception => {
        e.printStackTrace()
        //回滚
        connection.rollback()
        // 停掉sparkStreaming
        ssc.stop(true)
      }
    } finally {
      if (pstm != null) {
        pstm.close()
      }
      if (connection != null) {
        MetadataJDBCUtil.releaseCon(connection)
      }
    }

  }
}
