package com.zhang.gmall.util

import org.apache.kafka.common.TopicPartition

import java.util
import org.apache.spark.streaming.kafka010.OffsetRange
import redis.clients.jedis.Jedis

import scala.collection.mutable

/**
 * @title: 手动保存offset
 * @author: zhang
 * @date: 2022/3/21 17:05
 *        每次消费数据的时候从redis读取offset信息，数据消费完成后手动保存offset到redis
 */
object MyOffsetUtil {

  //保存offset方法  rediskey: topic:group   采用hash数据结构
  def saveOffset(topic: String, groupId: String, offsetRanges: Array[OffsetRange]) = {
    if (offsetRanges != null && offsetRanges.length > 0) {
      val offsets = new util.HashMap[String, String]()
      for (offsetRange <- offsetRanges) {
        val partition: Int = offsetRange.partition
        val endOffset: Long = offsetRange.untilOffset
        offsets.put(partition.toString, endOffset.toString)
      }
      println("提交offset：" + offsets)
      //获取redis连接
      val jedis: Jedis = JedisUtil.getJedis()
      val redisKey = s"offsets:$topic:$groupId"
      jedis.hset(redisKey, offsets)
      jedis.close()
    }
  }

  //获取offset方法 sparkstreaming要求offset的格式 Map[TopicPartition,Long]
  def getOffset(topic: String, groupId: String): Map[TopicPartition, Long] = {
    val jedis: Jedis = JedisUtil.getJedis()
    val redisKey = s"offsets:$topic:$groupId"
    val offsets: util.Map[String, String] = jedis.hgetAll(redisKey)
    println("读取到offset：" + offsets)
    val result: mutable.Map[TopicPartition, Long] = mutable.Map[TopicPartition, Long]()
    import scala.collection.JavaConverters._
    for ((partition, offset) <- offsets.asScala) {
      val topicPartition = new TopicPartition(topic, partition.toInt)
      result.put(topicPartition,offset.toLong)
    }
    jedis.close()
    result.toMap
  }


}
