package com.fwmagic.spark.streaming

import com.fwmagic.spark.streaming.util.JedisConnectionPool
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.{SparkConf, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._
import redis.clients.jedis.Jedis

/**
  * SparkStreaming整合kafka
  * 1.导入跟Kafka整合的依赖
  * 2.直连kakfa，创建DStream[使用底层API消费，效率更高]
  * 直连方式：RDD的分区数量和Kakfa的分区数量是--对应的【数目一样】
  */
object KafkaStreamingManageOffset {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName(this.getClass.getSimpleName)
                .setMaster("local[*]")

        val ssc = new StreamingContext(conf, Seconds(5))

        //设置日志级别
        //ssc.sparkContext.setLogLevel("WARN")

        val topics = List("ssc-wc")

        val bootstrapServers = "192.168.62.131:9092,192.168.62.132:9092,192.168.62.133:9092"
//        val bootstrapServers = "hd1:9092,hd2:9092,hd3:9092"
//        val bootstrapServers = "localhost:9092"

        //定义参数
        val kafkaParams = Map[String, String](
            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers,
            ConsumerConfig.GROUP_ID_CONFIG -> "ssc-gpb",
            ConsumerConfig.MAX_POLL_RECORDS_CONFIG -> "100",
            ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest",
            ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
            //默认:true,会每5s自动提交一次偏移量到Kafka的特殊Topic【__consumer_offsets】中
            //自己管理偏移量，手动提交,实现Exactly-Once
            ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "false",
            ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"
        )

        //手动维护offset,sink data to redis
        //将数据做统计后写到redis中
        //获取kafkaDStream
        val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc
            , LocationStrategies.PreferConsistent //调度task到kafka所在节点
            , ConsumerStrategies.Subscribe[String, String](topics, kafkaParams))

        kafkaDStream.foreachRDD((rdd: RDD[ConsumerRecord[String, String]]) => {
            //rdd的partitions个数会和kafka中topic的partitions个数一样，消费的时候是一一对应的
            println("===>rdd partitions:" + rdd.partitions.length)
            if (!rdd.isEmpty()) {
                //println(rdd)//KafkaRDD
                //获取offset范围
                //将RDD转换成KafkaRDD,获取KafkaRDD每一个分区的偏移量【在Driver端】
                val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
                /*for (or <- offsetRanges) {
                    /**
                      * offsetRanges中可以获取topic,partition及各分区消费的偏移量信息
                      * topic:ssc-wc,partition: 2,fromOffset:3, untilOffset:3
                      * topic:ssc-wc,partition: 0,fromOffset:2, untilOffset:2
                      * topic:ssc-wc,partition: 1,fromOffset:2, untilOffset:2
                      */
                   println(s"topic:${or.topic},partition: ${or.partition},fromOffset:${or.fromOffset}, untilOffset:${or.untilOffset}")
                }*/

                //获取rdd中的数据写到redis中
                val lines: RDD[String] = rdd.map(_.value())

                val wordAndOne: RDD[(String, Int)] = lines.flatMap(_.split("\\s")).map((_, 1))

                val reduced: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)

                reduced.foreachPartition((its: Iterator[(String, Int)]) => {
                    val jedis: Jedis = JedisConnectionPool.getConnection()
                    jedis.select(3)
                    its.foreach((tp: (String, Int)) => {
                        jedis.hincrBy("ssc_kafka_adv", tp._1, tp._2)
                    })
                    jedis.close()
                })
                //手动更新偏移量，异步提交
                kafkaDStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
            }
        })
        ssc.start()

        ssc.awaitTermination()
    }

}
