package com.shujia.spark.streaming

import java.util

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import redis.clients.jedis.Jedis

import scala.collection.mutable


object Demo7Direct {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setAppName("streaming")
      .setMaster("local[2]")

    /**
      * 创建streaming 上下文对象，指定batch的间隔时间，多久计算一次
      */

    val ssc = new StreamingContext(conf,Durations.seconds(5))



    val kafkaParams: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> "master:9092,node1:9092,node2:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "abdsdsf",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> "false"
    )
    val groupId = "aasdasd"

    val topics = Array("test_topic1")

    val key: String = groupId+":"+"test_topic1"

    // val partitionToInt = Map(new TopicPartition("test_topic1",1) -> 12L)

    /**
      * 读取redis中的消费偏移量
      */
    val redis = new Jedis("master",6379)

    val map: util.Map[String, String] = redis.hgetAll(key)

    import scala.collection.JavaConverters._

    val scalaMap: mutable.Map[String, String] = map.asScala

    val partitionOffser: Map[TopicPartition, Long] = scalaMap.map(kv =>{
      val partition: String = kv._1
      val offset: String = kv._2
      val tp = new TopicPartition("test_topic1",partition.toInt)

      (tp,offset.toLong)
    }).toMap

    println(partitionOffser)
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams,partitionOffser)
    )

    //val linesDS: DStream[String] = stream.map(_.value())

    stream.foreachRDD(rdd => {
      //取出value
      //编写用户自定义代码逻辑
      rdd.map(_.value()).foreach(println)
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      /**
        * 可以将偏移量保存到redis中
        * key:消费者组+ topi + partition
        * value ： until10offset作为value
        *
        * 启动redis
        * ./redis-server redis.conf
        */

      //1、创建jedis连接
      val jedis = new Jedis("master",6379)

      //获取消费偏移量
      for (offsetRange <- offsetRanges) {
        val fromOffset: Long = offsetRange.fromOffset
        val partition: Int = offsetRange.partition
        val topic: String = offsetRange.topic
        val untilOffset: Long = offsetRange.untilOffset
        println(topic+"\t"+partition+"\t"+fromOffset+"\t"+untilOffset)


        //保存消费消费偏移量到redis中
        jedis.hset(key,partition.toString,untilOffset.toString)
      }
    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}
