import java.util.concurrent.{Executors, ScheduledExecutorService, TimeUnit}

import com.wtw.utils.RedisUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, ConsumerStrategy, HasOffsetRanges, KafkaUtils, LocationStrategies, LocationStrategy, OffsetRange}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}


/**
  * 王庭伟201731102227
  */

object KafkaSparkStreamingDemo {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("kafkaSparkStreaming")
    val context = new SparkContext(conf)
    context.setLogLevel("WARN")
    //获取上下文对象
    val ssc = new StreamingContext(context, Seconds(5))

    var locationStrategy: LocationStrategy = LocationStrategies.PreferConsistent


    val brokers = "192.168.120.201:9092"
    val topic = "spark-kafka"
    val group = "sparkaGroup"
    val kafkaParam = Map(
      "bootstrap.servers" -> brokers,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    );

    var consumerStrategy: ConsumerStrategy[String, String] = ConsumerStrategies.Subscribe(Array(topic), kafkaParam)

    var resultDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc, locationStrategy, consumerStrategy)

    resultDStream.foreachRDD(iter => {
      if (iter.count() > 0) {
        iter.foreach(record => {
          val value: String = record.value()
          val offset: Long = record.offset()
          val key = "spark-kafka_" + offset

          //获取jedis jedis要在该方法内声明 不然会报错
          val jedis = RedisUtil.getConnection()

          //从redis中获取该偏移值状态
          val status = jedis.get(key)

          //redis中没有该key说明没有被消费
          if (status == null) {
            //设置状态 将该key的值设置为偏移值所对应的消费数据 方便定时线程消费
            jedis.set(key, value)
            //消费数据
            println(value)
            //设置定时销毁 60s确保即使挂掉重启后 redis中还有该key存在
            jedis.expire(key, 60)
          }

          RedisUtil.returnConnection(jedis)
        })

        val ranges: Array[OffsetRange] = iter.asInstanceOf[HasOffsetRanges].offsetRanges

        resultDStream.asInstanceOf[CanCommitOffsets].commitAsync(ranges)
      }

    })

    /*foreachPartition会报错 没找到解决办法
    resultDStream.foreachRDD(iter => {
      iter.foreachPartition(records => {
        records.foreach(record => {
          val value: String = record.value()
          val offset: Long = record.offset()
          val key = "spark-kafka_" + offset

          //获取jedis jedis 该方法内声明 不然会报错
          val jedis = RedisUtil.getConnection()

          //从redis中获取该偏移值状态
          val status = jedis.get(key)

          //redis中没有该key说明没有被消费
          if (status == null) {
            //设置状态 将该key的值设置为偏移值所对应的消费数据 方便定时线程消费
            jedis.set(key, value)
            //消费数据
            println(value)
            //设置定时销毁 60s确保即使挂掉重启后 redis中还有该key存在
            jedis.expire(key, 60)
          }

          RedisUtil.returnConnection(jedis)
        })

        val ranges: Array[OffsetRange] = iter.asInstanceOf[HasOffsetRanges].offsetRanges

        resultDStream.asInstanceOf[CanCommitOffsets].commitAsync(ranges)
      })
    })

    */

    ssc.start()
    ssc.awaitTermination()

  }
}
