package cn.oldsix.spark.streaming.spark

import kafka.serializer.StringDecoder
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/*
* @Author : Wu.D.J
* @Create : 2018.09.16
**/
object SparkStreamingApplicationOfWordCount {
    def main(args: Array[String]): Unit = {
        val path = "/streaming/wc-path-2"
        val ssc = StreamingContext.getOrCreate(path, () => createContext(path))
        ssc.start()
        ssc.awaitTermination()

    }

    def createContext(path : String) : StreamingContext = {
        val sparkConf : SparkConf = new SparkConf().setAppName("streaming-wc-2").setMaster("local[*]")
        val ssc = new StreamingContext(sparkConf,Seconds(30))
        ssc.checkpoint(path)
        // auto.offset.reset
        //      -> smallest 从最小位置开始消费
        //      -> largest  从最新位置开始消费
        val kafkaParams=Map("metadata.broker.list"->"oldsix:9092","group.id"->"wc-group-2", "auto.offset.reset" -> "largest")
        val topics=Set("wordcount")
        val dstream: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaParams,topics)
        val topicData: DStream[String] = dstream.map(_._2)
        val wordAndOne: DStream[(String, Int)] = topicData.flatMap(_.split(" ")).map((_,1))
        val result: DStream[(String, Int)] = wordAndOne.reduceByKey(_+_).updateStateByKey((values : Seq[Int], state : Option[Int]) => {
            var newValue = state.getOrElse(0)
            for (value <- values) {
                newValue += value
            }
            Option(newValue)
        })
        result.print()
        ssc
    }
}
