import java.net.InetAddress

import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.ByteArrayDeserializer
import org.apache.spark.{SparkConf, TaskContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.JavaConverters._
/**
  * Created by bymian on 2018/8/19.
  */
object CommitAsync {

  def main(args: Array[String]): Unit = {

    val conf = ConfigFactory.load()
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> conf.getString("kafka.brokers")
      , "key.deserializer" -> classOf[ByteArrayDeserializer]
      , "value.deserializer" -> classOf[ByteArrayDeserializer]
      , "group.id" -> conf.getString("kafka.groupid")
      , "receive.buffer.bytes" -> (65536: java.lang.Integer)
      , "auto.offset.reset" -> "latest"
    ).asJava

    //从配置文件中加载topic
    val topics = conf.getString("kafka.topics").split(",").toList.asJava

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName(s"${this.getClass.getSimpleName}")
    val ssc = new StreamingContext(sparkConf,Seconds(5))

    //创建kafkastream
    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )
    stream.map(t=>(t.key(), t.value()))

    stream.foreachRDD(rdd=>{
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd.mapPartitions(iter=>{
        val osr = offsetRanges(TaskContext.getPartitionId())
        val host = InetAddress.getLocalHost
        val count =iter.size
        Seq(s"host ${host} topic ${osr.topic} partition ${osr.partition} messageCount ${count}").toIterator
      }).collect().foreach(println)
      offsetRanges.foreach(println)
      stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
    })
    ssc.start()
    ssc.awaitTermination()
  }
}
