package com.wp.spark

import java.util
import java.util.Properties
import scala.util.control._

import org.apache.kafka.clients.consumer.{ ConsumerRecords, KafkaConsumer}
import org.apache.kafka.common.{TopicPartition}


//自动分配partition，手动控制offset的版本

object MyKafkaConsumer {
  def main(args: Array[String]): Unit = {

    //引入隐士类型转换！！！！
    import collection.JavaConversions._

    val props: Properties = new Properties
    props.put("bootstrap.servers","cdh141:9092,cdh142:9092,cdh143:9092,cdh144:9092,cdh145:9092")
    // 指定Consumer所属的ConsumerGroup
    props.put("group.id", "MyKafkaConsumer");
    // 设置手动提交Offset,由于只是获取部分数据所以不需要自动提交Offset
    props.put("enable.auto.commit", "false");
    props.put("session.timeout.ms", "60000")
    props.put("max.poll.interval.ms", "60000")
    // 设置键值对<K,V>反序列化器,使用String对象反序列化器
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    val consumer = new KafkaConsumer[String, String](props)

    //订阅主题
    consumer.subscribe(List("chantree-all"))
    //拉取一下消息，以便自动分配partition
    consumer.poll(1000)
    val partitions: util.Set[TopicPartition] = consumer.assignment()
    //手动设置partition的offset
    for(partition <- partitions){
      consumer.seek(partition,0L)
    }
    for(partition <- partitions){
      val offset: Long = consumer.position(partition)
      println(s"partition=$partition ,offset=$offset")
    }
    val loop = new Breaks
    loop.breakable{
      for(i <- 0 until 10){
        val records: ConsumerRecords[String, String] = consumer.poll(2000)
        if(records.isEmpty){
          println("未获取到数据")
          loop.break();
        }
        for(record <- records){
          println("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value())
        }
        //同步提交
        consumer.commitSync()
      }
    }
    consumer.close()
  }
}
