package com.atguigu.realtime.util

import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import org.json4s.jackson.{JsonMethods, Serialization}
import redis.clients.jedis.Jedis

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer

/**
 * Author atguigu
 * Date 2020/11/13 15:09
 */
object OffsetManager {
    // 从msyql读取offsets
    def readOffsetsFromMysql(groupId: String, topic: String) = {
        val url = "jdbc:mysql://hadoop162:3306/gmall_result?characterEncoding=utf-8&useSSL=false&user=root&password=aaaaaa"
        val sql =
            """
              |select
              | *
              |from ads_offset
              |where group_id=? and topic=?
              |""".stripMargin
        JDBCUtil
            .query(url, sql, List(groupId, topic))
            .map((row: Map[String, Object]) => {
                val partition = row("partition_id").toString.toInt
                val offset = row("partition_offset").toString.toLong
                new TopicPartition(topic, partition) -> offset
            })
            .toMap
        
    }
    
    // 把offset写入到mysql , 不实现. 在写入数据的时候, 一起实现
    
    def saveOffsets(offsetRanges: ListBuffer[OffsetRange], groupId: String, topic: String): Unit = {
        val client: Jedis = MyRedisUtil.getClient
        
        val key = s"offset:${groupId}:${topic}"
        val fieldAndValue = offsetRanges
            .map(offsetRange => {
                offsetRange.partition.toString -> offsetRange.untilOffset.toString
            })
            .toMap
            .asJava
        client.hmset(key, fieldAndValue)
        println("保存偏移量 topic_partition-> offset: " + fieldAndValue)
        client.close()
    }
    
    def readOffsets(groupId: String, topic: String) = {
        val key = s"offset:${groupId}:${topic}"
        val client: Jedis = MyRedisUtil.getClient
        println("读取开始的offsets")
        val topicPartitionAndOffset: Map[TopicPartition, Long] = client
            .hgetAll(key)
            .asScala
            .map {
                case (partition, offset) =>
                    new TopicPartition(topic, partition.toInt) -> offset.toLong
            }
            .toMap
        println("初始偏移量: " + topicPartitionAndOffset)
        client.close()
        topicPartitionAndOffset
    }
    
    //------
    def readOffsets(groupId: String, topic: Seq[String]) = {
        val key = s"offset:${groupId}"
        val client: Jedis = MyRedisUtil.getClient
        println("多个topic一个流:  读取开始的offsets")
        val topicPartitionAndOffset = client
            .hgetAll(key)
            .asScala
            .flatMap {
                case (topic, partitionAnOffset: String) =>
                    implicit val f = org.json4s.DefaultFormats
                    JsonMethods
                        .parse(partitionAnOffset)
                        .extract[Map[Int, Long]]
                        .map {
                            case (partition, offset) =>
                                new TopicPartition(topic, partition) -> offset
                        }
            }
            .toMap
        
        println("多个topic一个流:  初始偏移量: " + topicPartitionAndOffset)
        client.close()
        topicPartitionAndOffset
    }
    
    
    /*
    key: offset:xxxAPp
    value:  hash
            field       value
            ods_xxx     {1 -> 1000, 0 -> 2000}
     */
    def saveOffsets(offsetRanges: ListBuffer[OffsetRange], groupId: String, topics: Seq[String]): Unit = {
        val client: Jedis = MyRedisUtil.getClient
        
        val key = s"offset:${groupId}" // 会消费多个topic
        val fieldAndValue = offsetRanges
            .groupBy(_.topic)
            .map {
                case (topic, it: ListBuffer[OffsetRange]) =>
                    implicit val f = org.json4s.DefaultFormats
                    val value = it
                        .map(offsetRange => (offsetRange.partition, offsetRange.untilOffset))
                        .toMap
                    (topic, Serialization.write(value))
            }
            .asJava
        
        client.hmset(key, fieldAndValue)
        println("多个topic一个流_保存偏移量 topic_partition-> offset: " + fieldAndValue)
        client.close()
    }
}

/*
保存offsets到redis



 */