package com.quantgroup.data.consumer;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;

/**
 * 客户端消费者
 *
 * @author yangjinhua
 */
public class CustomConsumer {
    /**
     * 配置信息
     */
    static Properties props = new Properties();
    static Properties configure() {
        //kafka服务器地址
        props.put("bootstrap.servers", "192.168.5.152:9092");
        //必须指定消费者组
        props.put("group.id", "test");
        //是否开启自动提交 offset功能
        props.put("enable.auto.commit", "false");
        //自动提交offset的时间间隔
//        props.put("auto.commit.interval.ms", "1000");
        //设置数据key和value的序列化处理类
        props.put("key.deserializer", StringDeserializer.class);
        props.put("value.deserializer", StringDeserializer.class);
        return props;
    }

    public static void main(String[] args) {
        configure();

//        doConsume();
        customStorage();
    }

    /**
     * 1、开启自动提交offset,提交间隔时间1000ms
     * enable.auto.commit 是否开启自动提交 offset功能
     * auto.commit.interval.ms 自动提交 offset的时间间隔
     * 2、手动提交offset：commitSync（同步提交 和 commitAsync（异步提交）
     */
    private static void doConsume() {
        //创建消息者实例
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        //订阅first的消息
        consumer.subscribe(Arrays.asList("first"));

        //到服务器中读取记录
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println("key:" + record.key() + "" + ",value:" + record.value());
            }
//            //同步提交，当前线程会阻塞 直到 offset 提交成功
//            consumer.commitSync();

            //异步提交
            consumer.commitAsync(new OffsetCommitCallback() {
                @Override
                public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                    if (exception != null) {
                        System.err.println("Commit failed for" + offsets);
                    }
                }
            });
        }
    }

    /**
     * 自定义存储offset
     */
    private static final Map<TopicPartition, Long> CURRENT_OFFSET = new HashMap<>();

    private static void customStorage() {
        //创建一个消费者
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        //消费者订阅 主题
        consumer.subscribe(Arrays.asList("first"), new ConsumerRebalanceListener() {
            //该方法会在 Rebalance 之前调用
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                commitOffset(CURRENT_OFFSET);
            }

            //该方法会在 Rebalance 之后调用
            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                CURRENT_OFFSET.clear();
                for (TopicPartition partition : partitions) {
                    // 定位到最近提交的 offset 位置继续消费
                    consumer.seek(partition, getOffset(partition));
                }
            }
        });

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println("key:" + record.key() + "" + ",value:" + record.value());
                CURRENT_OFFSET.put(new TopicPartition(record.topic(), record.partition()), record.offset());
            }
            //异步提交
            commitOffset(CURRENT_OFFSET);
        }
    }

    /**
     * 获取某分区的最新 offset
     * @param partition
     * @return
     */
    private static long getOffset(TopicPartition partition) {
        return 0;
    }

    /**
     * 提交该消费者所有分区的 offset
     * @param currentOffset
     */
    private static void commitOffset(Map<TopicPartition, Long> currentOffset) {

    }

}