package com.shujia.streeam

import kafka.serializer.StringDecoder
import org.apache.spark.api.java.StorageLevels
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo10KafkaOnFlumeWC {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[4]").setAppName("Demo8SSCOnKafka")

    val sc = new SparkContext(conf)

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    ssc.checkpoint("spark/data/flume")
    //  读取的topic  和对应线程数
    val topics = Map("flume" -> 2)
    /**
      * 连接kafka创建Dstream
      *
      */

    val zk = "node2:2181,node3:2181,node4:2181"
    val groupId = "asdasdas"


    //指定kafka 消费者参数
    val kafkaParams = Map[String, String](
      "zookeeper.connect" -> zk,
      "group.id" -> groupId,
      "zookeeper.connection.timeout.ms" -> "10000",
      "auto.offset.reset" -> "smallest"
    )

    val linesDS = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, topics, StorageLevels.MEMORY_AND_DISK_SER)

    /**
      * 计算wc
      *
      */

    linesDS
      .map(_._2)
      .flatMap(_.split(","))
      .map((_, 1))
      .updateStateByKey((seq: Seq[Int], stat: Option[Int]) => Option(seq.sum + stat.getOrElse(0)))
      .print()


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }
}
