package cn.lagou.spark.withKafka

import java.util

import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import redis.clients.jedis.{Jedis, JedisPool, JedisPoolConfig}

import scala.collection.mutable

object OffsetsRedisUtils {
  // 定义Redis参数
  private val redisHost = "linux122"
  private val redisPort = 6379

  // 获取Redis的连接
  private val config = new JedisPoolConfig
  // 最大空闲数
  config.setMaxIdle(10)
  // 最大连接数
  config.setMaxTotal(30)

  private val pool = new JedisPool(config, redisHost, redisPort, 10000)

  private def getRedisConnection: Jedis = pool.getResource

  private val topicPrefix = "kafka:topic"

  private def getKey(topic: String, groupid: String) = s"$topicPrefix:$topic:$groupid"

  def getOffsetsFromRedis(topics: Array[String], groupId: String): Map[TopicPartition, Long] = {
    val jedis: Jedis = getRedisConnection

    val offsets: Array[mutable.Map[TopicPartition, Long]] = topics.map { topic =>
      val key: String = getKey(topic, groupId)

      import scala.collection.JavaConverters._

      jedis.hgetAll(key)
        .asScala
        .map { case (partition, offset) => new TopicPartition(topic, partition.toInt) -> offset.toLong }
    }

    // 归还资源
    jedis.close()
    offsets.flatten.toMap
  }

  // 将offsets保存到Redis中
  def saveOffsetsToRedis(offsets: Array[OffsetRange], groupId: String): Unit = {
    val jedis: Jedis = getRedisConnection
    offsets.map(range => (range.topic, range.partition -> range.untilOffset))
      .groupBy(_._1)
      .map { case (topic, buffer) => (topic, buffer.map(_._2)) }
      .foreach { case (topic, partitionAndOffset) =>
        val offsets: Array[(String, String)] =
          partitionAndOffset.map(elem => (elem._1.toString, elem._2.toString))

        import scala.collection.JavaConverters._

        //        val key: String = getKey(topic, groupId)
        //        val maps: util.Map[String, String] = offsets.toMap.asJava
        //        println(s"jedis 保存 key = $key , maps = $maps")
        //        jedis.hmset(key, maps)
        // 保存数据
        jedis.hmset(getKey(topic, groupId), offsets.toMap.asJava)
      }
    // 归还资源
    jedis.close()

  }
}








