package com.app.kafka

import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

/**
 * kafka 分区偏移量  自定义外部管理
 */
object KafkaRedis {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    conf.set("spark.streaming.kafka.maxRatePerPartition", "10000")

    val ssc = new StreamingContext(conf, Seconds(1))


    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "localhost:9092,anotherhost:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "hello",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )


    val topics = Array("topicA")


    // The details depend on your data store, but the general idea looks like this
    val fromOffsets: Map[TopicPartition, Long] = RedisDataProcess.selectOffsetsFromRedisDatabase(topics, kafkaParams("group.id").toString)
    // begin from the the offsets committed to the database



    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      ConsumerStrategies.Assign[String, String](fromOffsets.keys.toList, kafkaParams, fromOffsets)
    )

    stream.foreachRDD((rdd,time)=>{
      if(!rdd.isEmpty()){
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        //更新偏移量
        RedisDataProcess.updateOffsetsInRedisDatabase(offsetRanges,kafkaParams("group.id").toString)
        // begin your transaction

        // update results
        //有状态计算需要将结果存储redis 中
        // update offsets where the end of existing offsets matches the beginning of this batch of offsets
        // assert that offsets were updated correctly

        // end your transaction
      }
    })

  }
}
