package com.fwmagic.spark.streaming

import com.fwmagic.spark.streaming.util.JedisConnectionPool
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

/**
  * SparkStreaming整合kafka
  * 1.导入跟Kafka整合的依赖
  * 2.直连kakfa，创建DStream[使用底层API消费，效率更高]
  * 直连方式：RDD的分区数量和Kakfa的分区数量是--对应的【数目一样】
  */
object KafkaStreamingManageOffsetTransform {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName(this.getClass.getSimpleName)
                .setMaster("local[*]")

        val ssc = new StreamingContext(conf, Seconds(5))

        //设置日志级别
        //ssc.sparkContext.setLogLevel("WARN")

        val topics = List("ssc-wc")

        val bootstrapServers = "192.168.62.131:9092,192.168.62.132:9092,192.168.62.133:9092"
//        val bootstrapServers = "hd1:9092,hd2:9092,hd3:9092"
//        val bootstrapServers = "localhost:9092"

        //定义参数
        val kafkaParams = Map[String, String](
            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers,
            ConsumerConfig.GROUP_ID_CONFIG -> "ssc-gpb1234",
            ConsumerConfig.MAX_POLL_RECORDS_CONFIG -> "100",
            ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest",
            ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
            //默认:true,会每5s自动提交一次偏移量到Kafka的特殊Topic【__consumer_offsets】中
            //自己管理偏移量，手动提交,实现Exactly-Once
            ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "false",
            ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"
        )

        //手动维护offset,sink data to redis
        //将数据做统计后写到redis中
        //获取kafkaDStream
        val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc
            , LocationStrategies.PreferConsistent //调度task到kafka所在节点
            , ConsumerStrategies.Subscribe[String, String](topics, kafkaParams))


        //获取offsetRanges
        var offsetRanges: Array[OffsetRange] = null
        //transform操作
        val transFormDStream: DStream[ConsumerRecord[String, String]] = kafkaDStream.transform((rdd: RDD[ConsumerRecord[String, String]]) => {
            offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
            rdd
        })

        //继续对DStream进行操作，方便以后对数据进行Window,及updateStateByKey类的操作
        val reducedDStream: DStream[(String, Int)] = transFormDStream.map(_.value()).flatMap(_.split("\\s")).map((_, 1)).reduceByKey(_ + _)

        reducedDStream.foreachRDD((rdd: RDD[(String, Int)]) => {
            rdd.foreachPartition((tps: Iterator[(String, Int)]) => {
                //在Executor端获取redis连接
                val jedis: Jedis = JedisConnectionPool.getConnection()
                jedis.select(3)
                //将分区对应的结果写到redis
                for (tp <- tps) {
                    jedis.hincrBy("ssc_kafka_adv2", tp._1, tp._2)
                }
                //将连接还给连接池
                jedis.close()
            })
            //更新偏移量
            kafkaDStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
        })

        ssc.start()

        ssc.awaitTermination()
    }

}
