package SparkStreamingKafka

import Utils.JPools
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.codehaus.jackson.map.deser.std.StringDeserializer
/**
  * SparkStreaming与Kafka整合
  * 1：启动zkServer.start
  * 2：启动kafka-server-start.sh  /config/server.properties
  * 3：在Kafka集群中创建主题（或者通过写一个Producer创建主题，若当前主题没有，则自动创建）
  * 4：写一个Producer主题随机发送a-z的单词
  * 5：写一个Streaming从Kafka的主题消费数据
  * 6：对接收的数据进行切分做wordCount统计
  * 7：将算好的当前批次的wordCount存储到redis
  */
object WordCountKafka {

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
      .setAppName("WordCountKafka")
      .setMaster("local[*]")
    val ssc = new StreamingContext(sparkConf, Seconds(1))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "text-consumer-group",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("wordcount")

    //获取数据
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )

    stream.foreachRDD(rdd => {
      rdd.map(record => (record.value(), 1))
        .reduceByKey(_ + _) //当前的批次结果
        .foreachPartition(iter => {
        val jedis = JPools.getJedis
        //插入到redis
        iter.foreach(tp => {
          jedis.hincrBy("wordcount", tp._1, tp._2)
        })
        jedis.close()
      })
    })
    ssc.start()
    ssc.awaitTermination()
  }
}
