package com.nepu.spark.realtime.util

import java.util

import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import redis.clients.jedis.Jedis

import scala.collection.mutable

/**
  * 本类的主要的作用是做offset的维护
  *
  * @author chenshuaijun
  * @create 2022-10-30 14:18
  */
object KafkaOffsetUtils {

  /**
    * 读取维护在Redis中的kafka的offset
    *
    * @param topic   对于的topic
    * @param groupId 对于的用户组
    * @return 返回offset
    */
  def getOffset(topic: String, groupId: String): Map[TopicPartition, Long] = {
    val jedis: Jedis = MyRedisUtils.getJedisClient()
    val offsetKey: String = s"offset:$topic:$groupId"
    val offsetMap: util.Map[String, String] = jedis.hgetAll(offsetKey)
    jedis.close()
    import scala.collection.JavaConverters._
    val scalaMap: mutable.Map[String, String] = offsetMap.asScala
    val partitionToLong: mutable.Map[TopicPartition, Long] = scalaMap.map({
      case (partition, offset) => {
        val topicPartition = new TopicPartition(topic, partition.toInt)
        (topicPartition, offset.toLong)
      }
    })
    partitionToLong.toMap
  }

  def saveOffset(topic: String, groupId: String, offsetRange: Array[OffsetRange]): Unit = {
    // offset的结构是GTP = group + topic + partition => offset
    if (offsetRange != null && offsetRange.nonEmpty) {
      val parAndOffset = new util.HashMap[String, String]()
      for (elem <- offsetRange) {
        val partition: Int = elem.partition
        val offset: Long = elem.untilOffset
        parAndOffset.put(partition.toString, offset.toString)
      }
      // 我们对于offset的维护，在Redis中采用的存储的结构是hash
      // 关于存储offset的key我们使用的是topic+group
      val offsetKey: String = s"offset:$topic:$groupId"
      val jedis: Jedis = MyRedisUtils.getJedisClient()
      println(s"提交的offset: $parAndOffset")
      jedis.hset(offsetKey, parAndOffset)
      jedis.close()
    }

  }

}
