package com.mjf.spark.day10

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * 通过ReceiverAPI连接Kafka数据源，获取数据
 *    offset默认维护在zookeeper中
 */
object SparkStreaming04_ReceiverAPI {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming")

    val ssc: StreamingContext = new StreamingContext(conf, Seconds(3))

    // 连接Kafka，创建DStream
    val kafkaDStream: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(
      ssc,
      "hadoop102:2181,hadoop103:2181,hadoop104:2181",
      "bigdata",
      Map("bigdata-mjf" -> 2)
    )

    // 获取Kafka中的数据，我们只需要v的部分
    val lineDS: DStream[String] = kafkaDStream.map(_._2)

    // 扁平化
    val flatMapDS: DStream[String] = lineDS.flatMap(_.split(" "))

    // 结构转换 进行计数
    val mapDS: DStream[(String, Int)] = flatMapDS.map((_, 1))

    // 聚合操作
    val reduceDS: DStream[(String, Int)] = mapDS.reduceByKey(_ + _)

    // 打印输出
    reduceDS.print()

    // 启动采集器
    ssc.start()

    // 等待采集结束之后，终止程序
    ssc.awaitTermination()

  }
}
