package cluster

import kafka.serializer.StringDecoder
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, State, StateSpec, StreamingContext}

/**
  * Created by kawhi on 31/05/2017.
  */
object Test {
  def main(args: Array[String]) {
    if (args.length != 1) {
      System.err.println("Usage: HashJoin_state <stream.json>")
      System.exit(1)
    }
    // 参数读取
    val (brokers, topics, batch_duration, ports_num, m, r, kafka_offset, path, lgw, key_space)
    = ClusterUtils.getFromJson(args(0))

    // new 一个 streamingContext
    val sc = new SparkConf().setAppName("Test")
      .set("spark.streaming.stopGracefullyOnShutdown","true")
    val ssc = new StreamingContext(sc, Seconds(batch_duration))
    ssc.checkpoint(path + "/state/checkpoint")

    // kafka 配置接入
    val kafkaParams = Map[String, String](
      "metadata.broker.list" -> brokers,
      "auto.offset.reset" -> kafka_offset
    )

    // "timestamp port word" => (word, port) e.g. (A, 9999)
    val preProcessing = (str: String) => {
      val tmp = str.split(" ")
      (tmp(2), 1)
    }

    val mappingFunc = (word: String, one: Option[Int], state: State[Int]) => {
      val sum = one.getOrElse(0) + state.getOption.getOrElse(0)
      val output = (word, sum)
      state.update(sum)
      output
    }

    KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
      .flatMap(_._2.split(";"))
      .map(preProcessing)
      .mapWithState(StateSpec.function(mappingFunc))
      .foreachRDD((rdd, time) => {
        println(s"---- $time ----")
      })

    ssc.start()
    ssc.awaitTermination()
  }
}
