package com.kgc.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * Spark Streaming整合Kafka操作：Receiver方式
  */
object KafkaWordCount {
  def main(args: Array[String]) {
    if (args.length < 4) {
      System.err.println("Usage: KafkaWordCount <zkQuorum> <group> <topics> <numThreads>")
      System.exit(1)
    }

    val Array(zkQuorum, group, topics, numThreads) = args
    val sparkConf = new SparkConf().setAppName("KafkaWordCount").setMaster("local[2]")
    val ssc = new StreamingContext(sparkConf, Seconds(2))

    //我们需要消费的kafka数据的topic
    val topicMap = topics.split(",").map((_, numThreads.toInt)).toMap

    //创建DStream
    val messages = KafkaUtils.createStream(ssc, zkQuorum, group, topicMap)

    messages.map(_._2)      // 取出value
      .flatMap(_.split(" ")) // 将字符串使用空格分隔
      .map(word => (word, 1))      // 每个单词映射成一个pair
      .reduceByKey(_+_)  // 根据每个key进行累加
      .print() // 打印前10个数据

    ssc.start()
    ssc.awaitTermination()
  }

}
