package org.huangrui.spark.scala.streaming

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * @Author hr
 * @Create 2024-10-21 17:43 
 */
object SparkStreaming03_Kafka {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming")
    val sc: StreamingContext = new StreamingContext(sparkConf, Seconds(3))

    // TODO 通过环境对象对接Socket数据源，获取数据模型，进行数据处理
    // 创建配置参数
    val map: mutable.HashMap[String, AnyRef] = new mutable.HashMap[String, AnyRef]
    map.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop120:9092,hadoop121:9092,hadoop122:9092")
    map.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    map.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    map.put(ConsumerConfig.GROUP_ID_CONFIG, "test")
    map.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") // LEO


    // 需要消费的主题
    val strings = new mutable.HashSet[String]
    strings.add("test")
    // 2.读取 Kafka 数据创建 DStream
    val directStream = KafkaUtils.createDirectStream(sc,
      LocationStrategies.PreferBrokers, ConsumerStrategies.Subscribe[String, String](strings, map))
    // 3.将每条消息的 KV 取出
    val dstream = directStream.map((record: ConsumerRecord[String, String]) => record.value())
    //4.计算 WordCount
    dstream.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _).print()

    sc.start()
    sc.awaitTermination()
  }
}
