package com.study.spark.scala.streaming

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, KafkaUtils}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe

/**
  * 从kafka中读取数据
  *
  * @author stephen
  * @create 2019-03-19 17:35
  * @since 1.0.0
  */
object KafkaDirectWordCount {

  def main(args: Array[String]): Unit = {

//    if (args.length != 3) {
//      System.err.println("Usage: KafkaDirectWordCount <bootstrapServers> <groupId> <topics>")
//      System.exit(1)
//    }
//
//    val Array(bootstrapServers,groupId,topics) = args

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "localhost:9092", //指定Kafka的集群地址
      "key.deserializer" -> classOf[StringDeserializer], //指定key的反序列化器
      "value.deserializer" -> classOf[StringDeserializer], //指定值的反序列化器
      "group.id" -> "g_test", //consumer的分组id
      "auto.offset.reset" -> "latest", //从新定义消费者以后，不从头消费分区里的数据，只消费定义消费者以后的数据
      "enable.auto.commit" -> (false: java.lang.Boolean) //是否自动提交offsets，也就是更新kafka里的offset，表示已经被消费过了
    )

    //定义消费主题topic
    val topics = Array("test", "topic2")

    val conf: SparkConf = new SparkConf().setMaster("local[3]").setAppName("KafkaDirectWordCount")
    val ssc: StreamingContext = new StreamingContext(conf, Seconds(10))

    /**
      * PreferConsistent：分区策略 ---->在可用的Executor之间均匀分配分区
      * PreferBrokers：分区策略 ---->只有当执行程序与Kafka代理程序位于相同的节点时，才可以使用。
      * PreferFixed：分区策略 ---->分区之间的负载偏差比较大，就该用这个分区策略
      * Subscribe：消费策略 ---->消费固定主题上的消息
      * SubscribePattern：消费策略 ---->消费正则匹配到的主题上的消息
      * Assign：消费策略 ---->消费固定分区集合上消息
      * ConsumerRecord：包含了主题名、分区号、分区记录的偏移量、具体的值
      */
    val kafkaStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams))

    kafkaStream.map(record => (record.key(), record.value()))
      .flatMap(_._2.split(" "))
      .map((_, 1))
      .reduceByKey(_ + _)
      .print()

    ssc.start()
    ssc.awaitTermination()
  }
}
