package com.offcn.bigdata.spark.streaming.p1

import java.time.Duration
import java.util.Properties

import com.offcn.bigdata.util.db.JedisUtil
import org.apache.kafka.clients.consumer.{ConsumerRecord, ConsumerRecords, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.{JavaConversions, mutable}

/**
  * 因为使用它的这种方式，如果数据写出失败，就会造成最多一次(at-most-once)的消费语义，
  * 为了确保恰好一次(exactly-once)的语义，我们需要手动的来维护offset偏移量，其次
  * 还需要保证我们的offset更新操作和输出的操作在一个事务里面，至少要保证原子性操作。
  * 要么都成功，要么都失败。
  *
  *  编程的思路：
  *     1、从外置的存储介质中，读取上一次更新的offset数据
  *     2、可能没有offset
  *             从最早的位置开始读取
  *        如果有offset
  *           就从指定的offset位置读取
  *     3、完成业务逻辑
  *     4、提交offset（手动）
  */
object _06StreamingManageOffsetWithRedisOps {

    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("local").setAppName("StreamingManageOffset")
        val ssc = new StreamingContext(conf, Seconds(2))

        val topics = "spark".split(",")
        val kafkaParams = Map[String, Object](
            "bootstrap.servers" -> "bigdata01:9092,bigdata02:9092,bigdata03:9092",
            "key.deserializer" -> classOf[StringDeserializer].getName,
            "value.deserializer" -> classOf[StringDeserializer].getName,
            "group.id" -> "spark-kafka-group-2",
            "auto.offset.reset" -> "earliest",
            "enable.auto.commit" -> "false" //手动提交offset偏移量信息
        )

        val messages: InputDStream[ConsumerRecord[String, String]] = createMsg(ssc, topics, kafkaParams)

        messages.foreachRDD((rdd: RDD[ConsumerRecord[String, String]], bTime) => {
            if(!rdd.isEmpty()) {
                println("-------------------------------------------")
                println(s"Time: $bTime")
                println("-------------------------------------------")
                rdd.foreach(record => {
                    println(record)
                })
                //保存offset
                storeOffsets(rdd.asInstanceOf[HasOffsetRanges].offsetRanges, kafkaParams("group.id").toString)
            }
        })

        ssc.start()
        ssc.awaitTermination()
    }

    /**
      * 从kafka中读取数据
      *     kafka
      */
    def createMsg(ssc: StreamingContext, topics: Array[String], kafkaParams: Map[String, Object]): InputDStream[ConsumerRecord[String, String]] = {
        //step 1. 从外部的存储介质中读取上一次更新的offset
        val offsets:Map[TopicPartition, Long] = getFromOffsets(topics, kafkaParams("group.id").toString)
        //step 2. 加载外部数据
        var messages: InputDStream[ConsumerRecord[String, String]] = null
        if(offsets.isEmpty) { //没有读取到offset
            messages = KafkaUtils.createDirectStream(ssc,
                        LocationStrategies.PreferConsistent,
                        ConsumerStrategies.Subscribe(topics, kafkaParams)
                    )
        } else { //从指定的offset位置开始读取
            messages = KafkaUtils.createDirectStream(ssc,
                    LocationStrategies.PreferConsistent,
                    ConsumerStrategies.Subscribe(topics, kafkaParams, offsets)
                )
        }
        messages
    }

    /*
        从redis中读取数据
            hash
                key
                    field1-value1
                    field2-value2
                    field3-value3
              key用group
              field用topic+partition
              value就是offset

     */
    def getFromOffsets(topics: Array[String], group: String):Map[TopicPartition, Long] = {
        val map = mutable.Map[TopicPartition, Long]()
        val jedis = JedisUtil.getJedis
        val tpoMap = jedis.hgetAll(group)
        for((tp, offsetStr) <- JavaConversions.mapAsScalaMap(tpoMap)) {//java map 2 scala map
            val topic = tp.split("\\|")(0)
            if(topics.contains(topic)) {
                val partition =tp.split("\\|")(1).toInt
                val offset = offsetStr.toLong
                map.put(new TopicPartition(topic, partition), offset)
            }
        }
        JedisUtil.release(jedis)
        map.toMap
    }

    //保存offset
    def storeOffsets(offsetRanges: Array[OffsetRange], group: String): Unit = {
        val jedis = JedisUtil.getJedis
        for (offsetRange <- offsetRanges) {
            val topic = offsetRange.topic
            val partition = offsetRange.partition
            val offset = offsetRange.untilOffset
            val tp = s"${topic}|${partition}"
            jedis.hset(group, tp, offset + "")
        }
        JedisUtil.release(jedis)
    }
}
