package com.spark.demo
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import redis.clients.jedis.Jedis
import com.spark.demo.utils

import scala.collection.mutable
object sparkStreamAndKafka {


    def main(args: Array[String]): Unit = {
      //1、创建sparkConf
      val conf = new SparkConf()
        .setMaster("local[2]")
        .setAppName("kafkaSparkStreaming")
      //2、创建sparkContext环境
      val sc = new SparkContext(conf)
      //改变日志输出级别
      sc.setLogLevel("ERROR")
      //3、创建StreamingContext环境，获取上下文对象
      val ssc = new StreamingContext(sc, Seconds(5))
      //位置策略，首选一致
      var locationStrategy: LocationStrategy = LocationStrategies.PreferConsistent

      println("环境创建完成！")

      val brokers = "hadoop:9092"
      val topic = "topic-demo01"
      val group = "sparkaGroup"
      val kafkaParam = Map(
        "bootstrap.servers" -> brokers,
        "key.deserializer" -> classOf[StringDeserializer],
        "value.deserializer" -> classOf[StringDeserializer],
        "group.id" -> group,
        "auto.offset.reset" -> "latest",
        //      "auto.offset.reset" -> "earliest",
        "enable.auto.commit" -> (false: java.lang.Boolean)
      )

      //创建消费策略
      var consumerStrategy: ConsumerStrategy[String, String] =
        ConsumerStrategies.Subscribe(Array(topic), kafkaParam)
      println("消费者创建完成，Redis连接成功，开始消费了。。。。。")

      var resultDStream: InputDStream[ConsumerRecord[String, String]] =
        KafkaUtils.createDirectStream(ssc, locationStrategy, consumerStrategy)


      resultDStream.foreachRDD(iter => {
        println("========================================================================================================")
        println(System.currentTimeMillis() + ": ######采集到了一个DStream#########")
        if (iter.count() > 0) {

          iter.foreachPartition(t => {
            //开启连接池
            val jedis: Jedis = utils.getContion()
            t.foreach(record => {

              val value: String = record.value()
              val offsetLong: Long = record.offset()
              val offset_partition: Int = record.partition()
              val offset_topic: String = record.topic()
              val key = offset_topic + offset_partition + offsetLong

              if (!jedis.exists(key)) {
                //            println("消费前："+System.currentTimeMillis())
                println(System.currentTimeMillis() + "： 消费到第" + offset_partition + "区的第" + offsetLong + "条数据了！ " +
                  ": value: " + value + " ----offset: " + offsetLong + "----partition: " + offset_partition + "----topic: " + offset_topic)
                //将offset值缓存到redis
                jedis.set(key, value)
                jedis.expire(key, 600)
                //            println("消费后："+System.currentTimeMillis())
              }

            })
            //关闭连接
            jedis.close()
          })

          println(System.currentTimeMillis() + ": #######消费了一个DStream#########")

          //强转提交
          val ranges: Array[OffsetRange] = iter.asInstanceOf[HasOffsetRanges].offsetRanges
          resultDStream.asInstanceOf[CanCommitOffsets].commitAsync(ranges)

        }
        println(System.currentTimeMillis() + ": #######提交一个DStream#########")

      })
      println(System.currentTimeMillis() + ": ######启动实施计算程序#########")

      //启动实施计算程序
      ssc.start()
      ssc.awaitTermination() //等待终止
    }
  }
