package cn.doitedu.kafka.demos;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.sql.Connection;
import java.sql.Driver;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.time.Duration;
import java.util.*;

public class ConsumerDemo4手动提交消费位移 {
    public static void main(String[] args) {

        Properties props = new Properties();
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"g003");
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"doit01:9092");
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"latest");
        // 关闭消费位移的自动提交
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");

        // 限制一次poll拉取到的数据量的最大值
        props.setProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG,"10240000");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList("doit27-1"));


        while(true){
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(Long.MAX_VALUE));

            /**
             *
             * 消费位移的手动提交，该消费者负责的所有topic的所有partition打包提交最大消费位移
             * 在业务数据计算逻辑执行之前，立即提交本次拉取到的数据的最大位移
             * consumer.commitSync();
             *
             */


            /**
             * 也可以对位移的提交进行精确控制
             */
            Set<TopicPartition> partitions = records.partitions();
            // 创建一个hashmap来记录本次拉取的消息中，每个分区的最大消息位移
            HashMap<Integer, Long> partitionMaxOffset = new HashMap<>();
            for (TopicPartition partition : partitions) {
                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : partitionRecords) {
                    // 业务处理
                    System.out.println(record.key()+"," + record.value());
                    // 更新临时最大偏移量记录
                    partitionMaxOffset.put(partition.partition(),record.offset());

                }
            }

            // 将临时偏移量记录的hashmap转成api所需要的封装类型
            HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
            for (Map.Entry<Integer, Long> entry : partitionMaxOffset.entrySet()) {
                // 最大消息位移+1，才是消费者的消费偏移量，它代表的是下一次读取该从此位置读
                offsets.put(new TopicPartition("doit27-1",entry.getKey()),new OffsetAndMetadata(entry.getValue()+1));
            }


            /**
             *
             * 手动提交偏移量  __consumer_offsets
             * 在数据处理执行完成之后，进行消费位移的提交
             *
             * 同步api提交
             */
            consumer.commitSync(offsets);
            /**
             * 异步api提交，api中会允许用户预设一个回调逻辑
             */
            consumer.commitAsync(offsets, new OffsetCommitCallback() {
                @Override
                public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                    if(exception != null){  // 说明本次偏移量的异步提交失败了，发生了异常
                        //
                        try {
                            Class.forName("com.mysql.jdbc.Driver");
                            Connection conn = DriverManager.getConnection("jdbc:mysql://doit01:3306/abc", "root", "123456");
                            PreparedStatement pst = conn.prepareStatement("insert into kfk_offset values (?,?,?)");

                            for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
                                pst.setString(1,entry.getKey().topic());  // 主题
                                pst.setInt(2,entry.getKey().partition());  // 分区
                                pst.setLong(3,entry.getValue().offset());  // 消费位移

                                pst.execute();
                            }


                        }catch (Exception e){

                        }
                    }
                }
            });


        }
    }
}
