package com.shujia.stream

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo5ReadKafka {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("stream")
      .setMaster("local[2]") //指定两个线程
      .set("spark.sql.shuffle.partitions", "4")
      .set("spark.streaming.receiver.writeAheadLog.enable","true")//开启WAL机制，需要设置Checkpoint路径

    val sc = new SparkContext(conf)

    //创建spark streaming上下文对象   指定间隔时间
    val ssc = new StreamingContext(sc, Durations.seconds(5))

    ssc.checkpoint("data/checkpoint")

    val topics = Map("topic4" -> 2) //2  两个线程读数据

    //连接kafka创建DS  key 偏移量   value  数据
    val kakfaDS = KafkaUtils.createStream(
      ssc,
      "node1:2181,node2:2181,node3:2181",
      "test",
      topics
    )
      //取出数据
      .map(_._2)
      .map(line => {
        val countyId = line.split(",")(3)
        (countyId, 1)
      })
      //统计每个区县实时人流量
      .updateStateByKey((seq: Seq[Int], opt: Option[Int]) => Option(seq.sum + opt.getOrElse(0)))

    kakfaDS.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}
