package com.atguigu.stream

import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * 监听kafka消息
 */
object TestSparkStreaming_KafkaSource {

  def main(args: Array[String]): Unit = {

    // 创建配置对象
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("TestSparkStreaming_WordCount")

    // 创建流式处理环境对象
    // 创建对象时，需要传递采集数据的周期（时间）
    val streamingConext = new StreamingContext(sparkConf, Seconds(5))

    // 一个类如果创建SparkContext,那么这个类我们称之为Driver类

    // 从Kafka集群中获取数据
    val paramMap: Map[String, String] = Map(
      "group.id" -> "atguigu",
      "zookeeper.connect" -> "linux1:2181",
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"
    )

    val topicMap: Map[String, Int] = Map("atguigu181111" -> 3)


    val kafkaDStream: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      streamingConext,
      paramMap,
      topicMap,
      StorageLevel.MEMORY_ONLY
    )

    val wordToCountDStream: DStream[(String, Int)] = kafkaDStream.map {
      case (k, v) => {
        (v, 1)
      }
    }

    // reduceByKey
    val wordToSumDStream: DStream[(String, Int)] = wordToCountDStream.reduceByKey(_ + _)

    // 打印数据
    wordToSumDStream.print()

    // TODO 启动采集器
    streamingConext.start()

    // TODO Driver不能停止，等待采集器的结束
    // wait, sleep
    streamingConext.awaitTermination()
  }
}
