package com.mjf.gmall.realtime.util

import java.util

import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import redis.clients.jedis.Jedis

import scala.collection.mutable

/**
 * 手动管理Kafka偏移量
 *    自动提交偏移量是按照时间间隔
 *    手动提交可以更自由管理偏移量，更加准确
 */
object OffsetManager {

  // 从redis中读取偏移量
  def getOffset(topicName: String, groupId: String): Map[TopicPartition,Long] = {
    // redis中偏移量的保存格式：type(hash)  key(offset:topicName:groupId)  field(partitionId)  value(offset)
    val offsetKey = s"offset:${topicName}:${groupId}"
    val jedis: Jedis = RedisUtil.getJedisClient
    val offsetMap: util.Map[String, String] = jedis.hgetAll(offsetKey)
    jedis.close()

    // 将Java中的Map转为Scala中的Map
    import scala.collection.JavaConversions._
    val kafkaOffsetMap: Map[TopicPartition, Long] = offsetMap.map {
      case (partitionId, offset) =>
        println("加载分区偏移量" + partitionId + ":" + offset)
        (new TopicPartition(topicName, partitionId.toInt), offset.toLong)
    }.toMap   // toMap:将可变Map转为不可变Map

    kafkaOffsetMap
  }

  // 把偏移量写入redis
  def saveOffset(topicName: String, groupId: String, offsetRanges: Array[OffsetRange]): Unit = {
    // redis中偏移量的保存格式：type(hash)  key(offset:topicName:groupId)  field(partitionId)  value(offset)
    val offsetKey = s"offset:${topicName}:${groupId}"
    val offsetMap: util.HashMap[String, String] = new util.HashMap()

    // 将OffsetRange转成Map
    for (offset <- offsetRanges) {
      val partition: Int = offset.partition
      val untilOffset: Long = offset.untilOffset
      offsetMap.put(partition.toString, untilOffset.toString)

      println("写入分区" + partition + ":" + offset.fromOffset + "-->" + untilOffset)
    }

    // 将偏移量写入redis
    if(offsetMap != null && offsetMap.size() > 0) {
      val jedis: Jedis = RedisUtil.getJedisClient
      jedis.hmset(offsetKey, offsetMap)
      jedis.close()
    }
  }

}
