package cn.lagou.homework1

import java.util

import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import redis.clients.jedis.{Jedis, JedisPool, JedisPoolConfig}

import scala.collection.mutable

object OffsetsUtils {
  // 定义redis的参数
  private val redisHost = "192.168.71.100"
  private val redisPort = 6379

  // 获取redis的连接
  private val config = new JedisPoolConfig
  // 最多空闲数
  config.setMaxIdle(5)
  // 最大连接数
  config.setMaxTotal(10)

  private val poll = new JedisPool(config, redisHost, redisPort, 10000)
  private def getRedisConnection:Jedis =poll.getResource

  private val topicPrefix = "kafka:topic"

  // key: kafka:topic:TopicName:groupid
  private def getKey(topic: String, groupid: String) =s"$topicPrefix:$topic:$groupid"

  // 根据key获取offsets
  def getOffsetsFromRedis(topics: Array[String], groupId: String): Map[TopicPartition, Long]= {
    val jedis: Jedis = getRedisConnection

    val offsets: Array[mutable.Map[TopicPartition, Long]] = topics.map { topic =>
      val key = getKey(topic, groupId)
      import scala.collection.JavaConverters._

      jedis.hgetAll(key)
        .asScala
        .map { case (partition, offset) => new TopicPartition(topic, partition.toInt) -> offset.toLong }
    }
    // 归还资源
    jedis.close()

    offsets.flatten.toMap
  }

  // 将offsets保存到redis中
  def saveOffsetsToRedis(offsets: Array[OffsetRange], groupId: String): Unit={
    // 获取连接
    val jedis: Jedis = getRedisConnection

    // 组织数据
    offsets.map{rang => (rang.topic, (rang.partition.toString, rang.untilOffset.toString))}
        .groupBy(_._1)
        .foreach{case (topic, buffer) =>
          val key = getKey(topic, groupId)
          import scala.collection.JavaConverters._
          val maps: util.Map[String, String] = buffer.map(_._2).toMap.asJava
          // 保存数据 key Map[String, String](partition, offset)
          jedis.hmset(key, maps)

        }
    jedis.close()
  }
}
