package com.fwmagic.spark.streaming

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

/**
  * SparkStreaming整合kafka
  * 1.导入跟Kafka整合的依赖
  * 2.直连kakfa，创建DStream[使用底层API消费，效率更高]
  * 直连方式：RDD的分区数量和Kakfa的分区数量是--对应的【数目一样】
  */
object KafkaStreaming {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName(this.getClass.getSimpleName)
                .setMaster("local[*]")

        val ssc = new StreamingContext(conf,Seconds(5))

        //设置日志级别
        //ssc.sparkContext.setLogLevel("WARN")

        val topics =List("ssc-wc")

        val bootstrapServers = "192.168.62.131:9092,192.168.62.132:9092,192.168.62.133:9092"
//        val bootstrapServers = "hd1:9092,hd2:9092,hd3:9092"
//        val bootstrapServers = "localhost:9092"

        //定义参数
        val kafkaParams = Map[String,String](
            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers,
            ConsumerConfig.GROUP_ID_CONFIG -> "xssc-gpa",
            ConsumerConfig.MAX_POLL_RECORDS_CONFIG -> "100",
            ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest",
            ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
            //默认:true,会每5s自动提交一次偏移量到Kafka的特殊Topic【__consumer_offsets】中
            //自己管理偏移量，手动提交,实现Exactly-Once
            //ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "false",
            ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"
        )

        //获取kafkaDStream
        val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc
            , LocationStrategies.PreferConsistent //调度task到kafka所在节点
            , ConsumerStrategies.Subscribe[String,String](topics, kafkaParams))


        //处理数据
        val lines: DStream[String] = kafkaDStream.map(record => record.value())


        //将数据做统计后写到redis中
        val wordAndOne: DStream[(String, Int)] = lines.flatMap(_.split("\\s")).map((_,1))

        wordAndOne.foreachRDD((rdd:RDD[(String,Int)]) =>{
            //收集数据到Driver端
            val list: Array[(String, Int)] = rdd.collect()
            //下沉到redis
            val jedis = new Jedis("192.168.62.131",6379)
            //使用redis的hset存储，hincrBy:没有则直接存储，有就自动累加
            list.foreach(tuple =>{
                jedis.hincrBy("streaming-wc",tuple._1,tuple._2)
            })
            jedis.close()
        })

        ssc.start()

        ssc.awaitTermination()
    }

}
