package cn._51doit.kafka.clients

import java.time.Duration
import java.util
import java.util.Properties

import org.apache.kafka.clients.consumer._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

/**
 * 让消费者不自动提交偏移量，而是写代码提交偏移量（手动提交）
 * 提交指定的偏移量到Kafka特殊的topic中
 * 偏移量由：group.id, topic, partition -> offset
 */
object ConsumerCommitOffsetDemo3 {

  def main(args: Array[String]): Unit = {

    // 1 配置参数
    val props = new Properties()
    //从哪些broker消费数据
    props.setProperty("bootstrap.servers", "node-1.51doit.cn:9092,node-2.51doit.cn:9092,node-3.51doit.cn:9092")
    // 反序列化的参数
    props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    props.setProperty("value.deserializer", classOf[StringDeserializer].getName)
    // 指定group.id
    props.setProperty("group.id", "g005")

    // 指定消费的offset从哪里开始
    //earliest：从头开始 --from-beginning
    //latest:从消费者启动之后
    props.setProperty("auto.offset.reset", "earliest") //[latest, earliest, none]

    // 是否自动提交偏移量  offset
    props.setProperty("enable.auto.commit", "false") // kafka自动维护偏移量     手动维护偏移量

    //enable.auto.commit   5000

    // 2 消费者的实例对象
    val consumer: KafkaConsumer[String, String] = new KafkaConsumer[String, String](props)

    // 订阅   参数类型  java的集合
    val topic: util.List[String] = java.util.Arrays.asList("test")

    // 3 订阅主题
    consumer.subscribe(topic)

    while (true) {
      // 4  拉取数据
      val msgs: ConsumerRecords[String, String] = consumer.poll(Duration.ofMillis(2000))

      //新的方式
      import scala.collection.JavaConverters._
      val consumerRecords: Iterable[ConsumerRecord[String, String]] =  msgs.asScala

      for (cr <- consumerRecords) {
        println(cr)
      }

      if (!msgs.isEmpty) {

        //(("test", 1), 21)
        //(("test", 1), 22)
        //(("test", 2), 15)
        //(("test", 2), 13)
        //(("test", 2), 14)
        //(("test", 0), 17)
        val offsets: Iterable[((String, Int), Long)] = consumerRecords.map(record => ((record.topic(), record.partition()), record.offset()))
        //获取每个topic、每个分区，最大的偏移量
        val maxOffsetInPartition: Iterable[((String, Int), Long)] = offsets.groupBy(_._1).map{
          case(_, v) => {
            v.maxBy(_._2)
          }
        }
        //将数据整理成 ： Map[TopicPartition, OffsetAndMetadata]
        val offsetMap: Map[TopicPartition, OffsetAndMetadata] = maxOffsetInPartition.map(t => {
          (new TopicPartition(t._1._1, t._1._2), new OffsetAndMetadata(t._2, null))
        }).toMap
        println(offsetMap.toBuffer)
        //提交偏移量
        consumer.commitSync(offsetMap.asJava)

      }
    }

    //consumer.close()

  }
}
