package com.cmnit.gatherdata.modules.utils

import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util

import com.cmnit.gatherdata.utils.{ConfigurationManager, PhoenixUtils}
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
import org.apache.kafka.common.{PartitionInfo, TopicPartition}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, KafkaUtils, OffsetRange}

import scala.collection.mutable

/**
 * kafka工具类
 * 1.获取实时流对象
 * 2.offset维护
 *
 * @author chan
 * @version 1.2
 * @since 2022.3.3
 */
object KafkaStreamUtil {

  /**
   * 获取kafka的实时流对象
   *
   * @param streamingContext
   * @param kafkaBootstrapServers
   * @param zkConnect
   * @param topic
   * @return
   */
  def getStream(streamingContext: StreamingContext, kafkaBootstrapServers: String, zkConnect: String, topic: String, groupId: String): InputDStream[ConsumerRecord[String, String]] = {
    val saslJass = ConfigurationManager.getProperty("sasl.jaas.config")
    val autoOffsetReset = ConfigurationManager.getProperty("auto.offset.reset")
    val topics = topic.split(",").toSet

    //设置kafka参数
    val kafkaParams = Map[String, String](
      "bootstrap.servers" -> kafkaBootstrapServers,
      "group.id" -> groupId,
      "enable.auto.commit" -> "false",
      "auto.offset.reset" -> autoOffsetReset,
      "zookeeper.connect" -> zkConnect,
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "security.protocol" -> "SASL_PLAINTEXT",
      "sasl.mechanism" -> "GSSAPI",
      "sasl.kerberos.service.name" -> "kafka",
      "sasl.jaas.config" -> saslJass
    )

    // 数据库中offset查询
    import collection.JavaConversions._
    val consumer: KafkaConsumer[Long, String] = new KafkaConsumer[Long, String](kafkaParams)
    val offsets: mutable.HashMap[TopicPartition, Long] = mutable.HashMap[TopicPartition, Long]()
    val partitionList: util.ArrayList[TopicPartition] = new util.ArrayList[TopicPartition]
    var connect: Connection = null
    var pstmt: PreparedStatement = null
    var result: ResultSet = null
    var partitionInfo: util.List[PartitionInfo] = null
    try {
      connect = PhoenixUtils.getconnection
      for (topic <- topics) {
        val valueFields: String = topic + "," + groupId
        println("topic&groupid:" + valueFields)

        // 获取本地保存的offset
        pstmt = PhoenixUtils.queryHbaseByPhoenix("select * from offset_topic where topic = ? and groupId = ?", valueFields, connect)
        result = pstmt.executeQuery
        while (result.next) {
          // 分区数
          val p = result.getString("partitionNum")
          // 偏移量
          val o = result.getString("untilOffset")
          // 创建topic分区
          val partition: TopicPartition = new TopicPartition(topic, p.toInt)
          // 设置topic分区的偏移量
          offsets.put(partition, o.toLong)
        }

        // 获取kafka中的初始offset
        partitionInfo = consumer.partitionsFor(topic)
        // 将分区信息写入集合
        for (info <- partitionInfo) {
          val partition: TopicPartition = new TopicPartition(topic, info.partition)
          partitionList.add(partition)
        }
        // 获取各个分区的初始offset
        consumer.assign(partitionList)
        consumer.seekToBeginning(partitionList)

        // 是否需要舍弃之前的数据
        if ("latest" == ConfigurationManager.getProperty("offset.initial.position"))
          consumer.seekToEnd(partitionList)
      }
    } catch {
      case e: Exception => println(e)
    }
    finally {
      PhoenixUtils.close(connect, pstmt, result)
    }

    // 对比offset的值，如果kafka的初始offset大于本地offset，则进行替换
    for (partition <- partitionList) {
      val kafkaOffset: Long = consumer.position(partition)
      val localOffset: Long = offsets.getOrElse(partition, 0)
      if (kafkaOffset.>=(localOffset)) offsets.put(partition, kafkaOffset)
    }

    // 连接kafka
    var dataInputStream: InputDStream[ConsumerRecord[String, String]] = null
    println("offsets:" + offsets)
    dataInputStream = KafkaUtils.createDirectStream(
      streamingContext,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams, offsets)
    )

    dataInputStream
  }

  /**
   * 更新本地offset
   *
   * @param rdd
   */
  def updateOffset(rdd: RDD[ConsumerRecord[String, String]], groupId: String): Unit = {
    // 获取offset信息
    val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
    ranges.foreach(offset => {
      // 插入的字段
      val updateFields: String = "topic,partitionNum,untilOffset,groupId"
      // 插入的字段数据
      val valueFields: String = offset.topic + "," + offset.partition + "," + offset.untilOffset + "," + groupId
      print("offset表更新：")
      PhoenixUtils.replaceHbaseByPhoenix("offset_topic", updateFields, valueFields)
    })
  }

}
