package com.bigdata.spark.streaming

import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, ConsumerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * @author : ranzlupup
 * @date : 2023/3/4 17:38
 */
object KafkaDirectAPI {
    def main(args: Array[String]): Unit = {
        // 1.创建 SparkConf
        val sparkConf: SparkConf = new SparkConf().setAppName("ReceiverWordCount").setMaster("local[*]")

        // 2.创建StreamingContext
        val ssc: StreamingContext = new StreamingContext(sparkConf, Seconds(3))

        // 3.定义Kafka参数
        val kafkaPara: Map[String, Object] = Map[String, Object](
            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop102:9092,hadoop103:9092,hadoop104:9092",
            ConsumerConfig.GROUP_ID_CONFIG -> "atcper",
            "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
            "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer"
        )

        // 4.读取Kafka数据创建DStream
        val kafkaDataDS: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
            ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies.Subscribe[String, String](Set("atcper"), kafkaPara)
        )

        // 5.将每条消息的KV取出
        val valueDStream: DStream[String] = kafkaDataDS.map(
            record => {
                record.value()
            }
        )
        // 6.计算WordCount
        valueDStream
            .flatMap(_.split(" "))
            .map((_, 1))
            .reduceByKey(_+_)
            .print()

        // 7.开启任务
        ssc.start()
        ssc.awaitTermination()

    }
}
