package com.spark.util.example

import com.spark.util.core.{OffsetZk, SparkStreaming}
import com.spark.util.listener.SparkStreamingListener
import com.spark.util.utils.PropertiesUtil
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}

/**
 * 在转换过程中不能破坏RDD分区与Kafka分区之间的映射关系。
 * 亦即像map()/mapPartitions()这样的算子是安全的，
 * 而会引起shuffle或者repartition的算子，如reduceByKey()/join()/coalesce()等等都是不安全的。
 */
object StreamingExample extends SparkStreaming {

  def main(args: Array[String]): Unit = {

    enableLogEliminating() // 日志屏蔽
    enableTaskMonitorSupport() // task监控

    val kafkaServer = PropertiesUtil.getString("kafka.bootstrap.server")
    val zkServer = PropertiesUtil.getString("kafka.zookeeper.server")
    val topic = PropertiesUtil.getString("kafka.test.topic").split(",")
    val group = PropertiesUtil.getString("kafka.test.group")
    val kafkaParams = getKafkaParams(kafkaServer, group)

    val offsetZk = OffsetZk(zkServer)
    val offsets = offsetZk.getBeginOffset(topic,group)
    println(offsets)

    val ssc = setupSsc(None, 1)
    ssc.addStreamingListener(new SparkStreamingListener(ssc))
    val input = setupStream(ssc, topic, kafkaParams, offsets)

    var offsetRanges = Array.empty[OffsetRange]
    input.transform{ rdd =>
      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd
    }.map(x=> x.value()).foreachRDD{rdd =>
      if(!rdd.isEmpty()){
        // 业务代码
        rdd.foreach(println(_))
        // 保存偏移量
        offsetZk.saveEndOffset(offsetRanges, group)
      }
    }
    ssc.start()
    ssc.awaitTermination()
  }
}
