package com.yang.consumer.kafka.manual;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;

public class KafkaConsumerOffsetDemo {
    public static final String bootstrapServers = "192.168.137.31:9092,192.168.137.32:9092,192.168.137.33:9092";
    public static final String topic = "topic-demo";
    public static final String groupId = "consumer-demo";
    public static final String clientId = "consumer-demo-1";
    public static final AtomicBoolean isRunning = new AtomicBoolean(true);

    public static Properties initConfig() {
        Properties properties = new Properties();
        // kafka集群broker地址
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        // key反序列化器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // value序列化器
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // 消费组 group.id
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        // 客户端id 不设置，默认 consumer-1，consumer-2
        properties.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
        // 手动提交位移
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        return properties;
    }

    public static void main(String[] args) {
        Properties properties = initConfig();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        // 订阅 多次订阅，最后一次为准 subscribe方式订阅会自动再均衡
        consumer.subscribe(Arrays.asList(topic));

        // 同步提交
        // demo1(consumer);
        // 同步提交 批量处理批量提交
        // demo2(consumer);
        // 同步提交 每消费一条，提交一次
        // demo3(consumer);
        // 同步提交 按分区消费。按分区提交
        // demo4(consumer);
        // 异步提交
        demo5(consumer);

    }

    // 再均衡和退出时，同步提交位移
    private static void demo5(KafkaConsumer<String, String> consumer) {
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord record : consumerRecords) {
                    System.out.println(record.toString());
                }
                consumer.commitAsync((map, e) -> {
                    if (e == null) {
                        map.forEach((key, value) -> {
                            System.out.println(key.toString());
                            System.out.println(value.toString());
                        });
                    } else {
                        System.out.println("提交失败");
                        e.printStackTrace();
                    }
                });
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                // 退出时，同步提交位移，避免异步未提交
                consumer.commitSync();
            } finally {
            consumer.close();
            }
        }
    }

    private static void demo4(KafkaConsumer<String, String> consumer) {
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(1000));
                // 按分区消费
                for (TopicPartition partition : consumerRecords.partitions()) {
                    List<ConsumerRecord<String, String>> records = consumerRecords.records(partition);
                    for (ConsumerRecord<String, String> record : records) {
                        System.out.println(record.toString());
                    }

                    long offset = records.get(records.size() - 1).offset();
                    consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(offset + 1)));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }

    private static void demo3(KafkaConsumer<String, String> consumer) {
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord record : consumerRecords) {
                    System.out.println(record.toString());

                    TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
                    // 每消费一条，提交一次
                    consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(record.offset() + 1)));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }

    private static void demo2(KafkaConsumer<String, String> consumer) {
        final int minBatchSize = 200;
        ArrayList<ConsumerRecord> buffer = new ArrayList<>();
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord record : consumerRecords) {
                    System.out.println(record.toString());
                    buffer.add(record);
                }
                if (buffer.size() >= minBatchSize) {
                    consumer.commitSync();
                    buffer.clear();
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }

    private static void demo1(KafkaConsumer<String, String> consumer) {
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord record : consumerRecords) {
                    System.out.println(record.toString());
                }
                consumer.commitSync();
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }


}
