package com.shujia.spark.stream

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe

object Demo9ReadKafka {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("kafka")
    val sc = new SparkContext(conf)

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    ssc.checkpoint("data/checkpoint")

    //配置文件对象
    val kafkaParams: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> "master:9092,node1:9092,node2:9092", //kafka集群列表
      "key.deserializer" -> classOf[StringDeserializer], // key反序列化类
      "value.deserializer" -> classOf[StringDeserializer], //value 反序列化的
      "group.id" -> "use_a_separate_group_id_for_each_stream", //消费者组
      "auto.offset.reset" -> "earliest" //读取数据的位置
    )

    //读取数据的topic
    val topics: Array[String] = Array("car")

    //消费kafka数据
    val recordsDS: InputDStream[ConsumerRecord[String, String]] = KafkaUtils
      .createDirectStream[String, String](
        ssc,
        PreferConsistent,
        Subscribe[String, String](topics, kafkaParams)
      )
    //读取value
    val carDS: DStream[String] = recordsDS.map(record => record.value())

    /**
     * 统计每隔城市的车流量
     */
    val kvRDD: DStream[(String, Int)] = carDS.map(car => {
      val city: String = car.split(",")(6)
      (city, 1)
    })

    val cityFlowDS: DStream[(String, Int)] = kvRDD
      .updateStateByKey((seq: Seq[Int], state: Option[Int]) => Option(seq.sum + state.getOrElse(0)))

    cityFlowDS.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
