package com.whiteseason.streaming

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import redis.clients.jedis.{Jedis, JedisPool, JedisPoolConfig, Transaction}

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.Try

object KafkaSparkStreamingDemo {
  /*
    处理offset的方法，兼校验offset是否被消费
   */
  def getOffset(topic: String, groupId: String): mutable.Map[TopicPartition, Long] = {
    val fromOffset = scala.collection.mutable.Map[TopicPartition, Long]()
    val jedis: Jedis = getConnection
    //根据定义的前缀获取所有topic的key
    val keys = jedis.keys(s"offset_${groupId}_$topic*")
    var offset : String  = "0"
    if (! keys.isEmpty) {
      keys.asScala.foreach(key => {
        //得到带有标识的offset
        val value = jedis.get(key)
        //判断该offset是否被消费
        if (value.startsWith("1_") && value != null)
          //调整offset为标识offset的下一个
          offset = (value.substring(2).toLong + 1).toString
        else if(value != null)
          //保持offset不变，进行消费
          offset = value.substring(2)
        val partition = Try(key.split(s"offset_${groupId}_${topic}_").apply(1)).getOrElse("0")
//        println(offset)
        fromOffset.put(new TopicPartition(topic, partition.toInt), offset.toLong)
      })
    }
    jedis.close()
    fromOffset
  }

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("kafkaSparkStreaming")
    //设定spark测试运行内存，否则会报运行内存不足错误
    conf.set("spark.testing.memory", "2147480000")
    val context = new SparkContext(conf)
    context.setLogLevel("WARN")
    //获取上下文对象
    val ssc = new StreamingContext(context , Seconds(5))

    var locationStrategy: LocationStrategy = LocationStrategies.PreferConsistent
    val brokers = "spark:9092"
    val topic = "spark-kafka"
    val group = "sparkaGroup"
    //获取offset
    val offsets = getOffset(topic,group)
    val kafkaParam = Map(
    "bootstrap.servers"-> brokers,
    "key.deserializer" ->classOf[StringDeserializer],
    "value.deserializer"->classOf[StringDeserializer],
    "group.id"->group,
    "auto.offset.reset"-> "latest",
    "enable.auto.commit" ->(false:java.lang.Boolean)
    )
    var consumerStrategy: ConsumerStrategy[String, String] = null
    //判断该topic的offset在redis是否存在，不存在则从kafka中读取
    if(offsets.nonEmpty){
      consumerStrategy = ConsumerStrategies.Subscribe(Array(topic), kafkaParam, offsets)
    }else{
      consumerStrategy = ConsumerStrategies.Subscribe(Array(topic), kafkaParam)
    }

    var resultDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc, locationStrategy, consumerStrategy)

    resultDStream.foreachRDD(iter=>{
      if(iter.count() > 0){
        iter.foreach(record =>{
          val jedis: Jedis = getConnection
          //创建redis提交任务
          val transaction: Transaction = jedis.multi()
          val result : String = record.value()
          //定义该topic的key，方便程序读取
          val key = s"offset_${group}_${record.topic}_${record.partition}"
          //对offset进行标识，"0_"表示已从kafka中读取，但未消费
          var value = "0_" + record.offset()
          transaction.set(key,value)
          //提交，保存状态
          transaction.exec()
          transaction.close()
          //消费数据
          println(result)
          //修改offset标记，"1_"前缀代表该offset已经完成处理
          value = "1_" + record.offset()
          val transaction2: Transaction = jedis.multi()
          //设置offset状态标记失效时间，防止offset状态占用redis空间过多
          transaction2.setex(key,10,value)
          transaction2.exec()
          transaction2.close()
          jedis.close()
        })
        val ranges: Array[OffsetRange] = iter.asInstanceOf[HasOffsetRanges].offsetRanges
        resultDStream.asInstanceOf[CanCommitOffsets].commitAsync(ranges)
      }
    })

    ssc.start()
    ssc.awaitTermination()

  }
  def getConnection: Jedis ={
    val redisconf = new JedisPoolConfig()
    val pool = new JedisPool(redisconf,"spark",6379)
    redisconf.setMaxTotal(20)
    redisconf.setMaxIdle(20)
    val jedis = pool.getResource
    jedis.auth("whiteseason")
    jedis
  }
}
