package com.example.window.state

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, State, StateSpec, StreamingContext}

object AccumulateWordCount2 {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setMaster("local[*]")
      .setAppName(this.getClass.getCanonicalName)
    val ssc = new StreamingContext(conf, Seconds(5))
    ssc.checkpoint("data/checkpoint/")
    val lines: ReceiverInputDStream[String] =
      ssc.socketTextStream("localhost", 9999)
    val words: DStream[String] = lines.flatMap(_.split("\\s+"))
    val wordDstream: DStream[(String, Int)] = words.map(x => (x, 1))

    // 函数返回的类型即为 mapWithState 的返回类型
    // (KeyType, Option[ValueType], State[StateType]) =>MappedType
    def mappingFunction(key: String, one: Option[Int], state: State[Int]): (String, Int) = {
      val sum: Int = one.getOrElse(0) +
        state.getOption.getOrElse(0)
      state.update(sum)
      (key, sum)
    }

    val spec = StateSpec.function(mappingFunction _)
    val resultDStream: DStream[(String, Int)] = wordDstream.mapWithState[Int, (String, Int)](spec)
    resultDStream.cache()
    // 把DStream保存到文本文件中，会生成很多的小文件。一个批次生成一个目录
    val outputDir = "data/output2/"
    resultDStream.repartition(1).saveAsTextFiles(outputDir)
    ssc.start()
    ssc.awaitTermination()
  }

}
