package com.sinux.generality.basesupport.utils.kafka.consumer;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;

public class DefaultKafkaConsumerClass<K, V> implements KafkaConsumerInterface {

    //    broker的地址清单，建议至少填写两个，避免宕机
    private List<String> bootstrapServers = null;


    //  用来唯一标识consumer进程所在组的字符串，如果设置同样的group  id，表示这些processes都是属于同一个consumer  group
    private String groupId;

    //	指定zookeeper的连接的字符串，格式是hostname：port，此处host和port都是zookeeper server的host和port，为避免某个zookeeper 机器宕机之后失联，你可以指定多个hostname：port，使用逗号作为分隔：
//hostname1：port1，hostname2：port2，hostname3：port3
//可以在zookeeper连接字符串中加入zookeeper的chroot路径，此路径用于存放他自己的数据，方式：
//hostname1：port1，hostname2：port2，hostname3：port3/chroot/path
    private String zookeeperConnect;

    //    不需要设置，一般自动产生
    private String consumerId = null;

    //    网络请求的超时限制。真实的超时限制是
//    max.fetch.wait+socket.timeout.ms
    private int socketTimeoutMs =30*100;

    //    socket用于接收网络请求的缓存大小
    private int socketReceiveBufferBytes = 64*1024;

    //  每次fetch请求中，针对每次fetch消息的最大字节数。这些字节将会督导用于每个partition的内存中，因此，此设置将会控制consumer所使用的memory大小。这个fetch请求尺寸必须至少和server允许的最大消息尺寸相等，否则，producer可能发送的消息尺寸大于consumer所能消耗的尺寸。
    private int fetchMessageMaxBytes = 1024*1024;

    //  用于fetch数据的fetcher线程数
    private int numConsumerFetchers = 1;

    //  如果为真，consumer所fetch的消息的offset将会自动的同步到zookeeper。这项提交的offset将在进程挂掉时，由新的consumer使用
    private boolean enableAutoCommit = false;

    //  consumer向zookeeper提交offset的频率，单位是秒
    private int autoCommitIntervalMs = 1;

    //    用于缓存消息的最大数目，以供consumption。每个chunk必须和fetch.message.max.bytes相同
    private int queuedMaxMessageChunks=2;

    //  当新的consumer加入到consumer  group时，consumers集合试图重新平衡分配到每个consumer的partitions数目。如果consumers集合改变了，当分配正在执行时，这个重新平衡会失败并重入
    private int rebalanceMaxRetries = 4;

    //    每次fetch请求时，server应该返回的最小字节数。如果没有足够的数据返回，请求会等待，直到足够的数据才会返回。
    private int fetchMinBytes = 1;

    //  如果没有足够的数据能够满足fetch.min.bytes，则此项配置是指在应答fetch请求之前，server会阻塞的最大时间。
    private int fetchWaitMaxMs = 100;

    //  	在重试reblance之前backoff时间
    private int rebalanceBackoffMs = 2000;

    //    在试图确定某个partition的leader是否失去他的leader地位之前，需要等待的backoff时间
    private int refreshLeaderBackoffMs =200;

    //    zookeeper中没有初始化的offset时，如果offset是以下值的回应：
//    smallest：自动复位offset为smallest的offset
//    largest：自动复位offset为largest的offset
//    anything  else：向consumer抛出异常
    private String autoOffsetReset = "earliest";

    //    如果没有消息可用，即使等待特定的时间之后也没有，则抛出超时异常
    private int consumerTimeoutMs = -1;

    //    是否将内部topics的消息暴露给consumer
    private boolean excludeInternalTopics = true;

    //    选择向consumer 流分配partitions的策略，可选值：
//    range
//    roundrobin
    private String parititionAssignmentStrategy="range";

    //    是用户特定的字符串，用来在每次请求中帮助跟踪调用。它应该可以逻辑上确认产生这个请求的应用
    private String clientId;

    //    zookeeper 会话的超时限制。如果consumer在这段时间内没有向zookeeper发送心跳信息，则它会被认为挂掉了，并且reblance将会产生
    private int zookeeperSessionTimeoutMs = 6000;

    //   客户端在建立通zookeeper连接中的最大等待时间
    private int zookeeperConnectionTimeoutMs =6000;

    //   ZK follower可以落后ZK leader的最大时间
    private int zookeeperSyncTimeMs = 2000;

    //   用于存放offsets的地点： zookeeper或者kafka
    private String offsetsStorage = "zookeeper";

    //   重新连接offsets channel或者是重试失败的offset的fetch/commit请求的backoff时间
    private int offsetChannelBackoffMs = 1000;

    //   当读取offset的fetch/commit请求回应的socket 超时限制。此超时限制是被consumerMetadata请求用来请求offset管理
    private int offsetsChannelSocketTimeoutMs = 10000;

    //   重试offset commit的次数。这个重试只应用于offset  commits在shut-down之间。他
    private int offsetsCommitMaxRetries = 5;

    //   如果使用“kafka”作为offsets.storage，你可以二次提交offset到zookeeper(还有一次是提交到kafka）。在zookeeper-based的offset  storage到kafka-based的offset storage迁移时，这是必须的。对任意给定的consumer  group来说，比较安全的建议是当完成迁移之后就关闭这个选项
    private boolean dualCommitEnabled = true;

    //   在“range”和“roundrobin”策略之间选择一种作为分配partitions给consumer 数据流的策略； 循环的partition分配器分配所有可用的partitions以及所有可用consumer  线程。它会将partition循环的分配到consumer线程上。如果所有consumer实例的订阅都是确定的，则partitions的划分是确定的分布。循环分配策略只有在以下条件满足时才可以：
//   （1）每个topic在每个consumer实力上都有同样数量的数据流。
//   （2）订阅的topic的集合对于consumer  group中每个consumer实例来说都是确定的。
    private String partitionAssignmentStrategy = "range";

    //配置集合
    private Properties properties = null;

    //poll时间
    private int pollTime = 1000;

    private  String keyDeserializer="com.sinux.generality.basesupport.utils.kafka.consumer.StringDeserializer";

    private  String valueDeserializer = "com.sinux.generality.basesupport.utils.kafka.consumer.TransferObjectDeserializer";

    //KafkaConsumer
    private Consumer<K, V> consumer = null;

    public List<String> getBootstrapServers() {
        return bootstrapServers;
    }

    public void setBootstrapServers(List<String> bootstrapServers) {
        this.bootstrapServers = bootstrapServers;
    }

    public String getGroupId() {
        return groupId;
    }

    public void setGroupId(String groupId) {
        this.groupId = groupId;
    }

    public String getZookeeperConnect() {
        return zookeeperConnect;
    }

    public void setZookeeperConnect(String zookeeperConnect) {
        this.zookeeperConnect = zookeeperConnect;
    }

    public String getConsumerId() {
        return consumerId;
    }

    public void setConsumerId(String consumerId) {
        this.consumerId = consumerId;
    }

    public int getSocketTimeoutMs() {
        return socketTimeoutMs;
    }

    public void setSocketTimeoutMs(int socketTimeoutMs) {
        this.socketTimeoutMs = socketTimeoutMs;
    }

    public int getSocketReceiveBufferBytes() {
        return socketReceiveBufferBytes;
    }

    public void setSocketReceiveBufferBytes(int socketReceiveBufferBytes) {
        this.socketReceiveBufferBytes = socketReceiveBufferBytes;
    }

    public int getFetchMessageMaxBytes() {
        return fetchMessageMaxBytes;
    }

    public void setFetchMessageMaxBytes(int fetchMessageMaxBytes) {
        this.fetchMessageMaxBytes = fetchMessageMaxBytes;
    }

    public int getNumConsumerFetchers() {
        return numConsumerFetchers;
    }

    public void setNumConsumerFetchers(int numConsumerFetchers) {
        this.numConsumerFetchers = numConsumerFetchers;
    }

    public boolean isEnableAutoCommit() {
        return enableAutoCommit;
    }

    public void setEnableAutoCommit(boolean enableAutoCommit) {
        this.enableAutoCommit = enableAutoCommit;
    }

    public int getAutoCommitIntervalMs() {
        return autoCommitIntervalMs;
    }

    public void setAutoCommitIntervalMs(int autoCommitIntervalMs) {
        this.autoCommitIntervalMs = autoCommitIntervalMs;
    }

    public int getQueuedMaxMessageChunks() {
        return queuedMaxMessageChunks;
    }

    public void setQueuedMaxMessageChunks(int queuedMaxMessageChunks) {
        this.queuedMaxMessageChunks = queuedMaxMessageChunks;
    }

    public int getRebalanceMaxRetries() {
        return rebalanceMaxRetries;
    }

    public void setRebalanceMaxRetries(int rebalanceMaxRetries) {
        this.rebalanceMaxRetries = rebalanceMaxRetries;
    }

    public int getFetchMinBytes() {
        return fetchMinBytes;
    }

    public void setFetchMinBytes(int fetchMinBytes) {
        this.fetchMinBytes = fetchMinBytes;
    }

    public int getFetchWaitMaxMs() {
        return fetchWaitMaxMs;
    }

    public void setFetchWaitMaxMs(int fetchWaitMaxMs) {
        this.fetchWaitMaxMs = fetchWaitMaxMs;
    }

    public int getRebalanceBackoffMs() {
        return rebalanceBackoffMs;
    }

    public void setRebalanceBackoffMs(int rebalanceBackoffMs) {
        this.rebalanceBackoffMs = rebalanceBackoffMs;
    }

    public int getRefreshLeaderBackoffMs() {
        return refreshLeaderBackoffMs;
    }

    public void setRefreshLeaderBackoffMs(int refreshLeaderBackoffMs) {
        this.refreshLeaderBackoffMs = refreshLeaderBackoffMs;
    }

    public String getAutoOffsetReset() {
        return autoOffsetReset;
    }

    public void setAutoOffsetReset(String autoOffsetReset) {
        this.autoOffsetReset = autoOffsetReset;
    }

    public int getConsumerTimeoutMs() {
        return consumerTimeoutMs;
    }

    public void setConsumerTimeoutMs(int consumerTimeoutMs) {
        this.consumerTimeoutMs = consumerTimeoutMs;
    }

    public boolean isExcludeInternalTopics() {
        return excludeInternalTopics;
    }

    public void setExcludeInternalTopics(boolean excludeInternalTopics) {
        this.excludeInternalTopics = excludeInternalTopics;
    }

    public String getParititionAssignmentStrategy() {
        return parititionAssignmentStrategy;
    }

    public void setParititionAssignmentStrategy(String parititionAssignmentStrategy) {
        this.parititionAssignmentStrategy = parititionAssignmentStrategy;
    }

    public String getClientId() {
        return clientId;
    }

    public void setClientId(String clientId) {
        this.clientId = clientId;
    }

    public int getZookeeperSessionTimeoutMs() {
        return zookeeperSessionTimeoutMs;
    }

    public void setZookeeperSessionTimeoutMs(int zookeeperSessionTimeoutMs) {
        this.zookeeperSessionTimeoutMs = zookeeperSessionTimeoutMs;
    }

    public int getZookeeperConnectionTimeoutMs() {
        return zookeeperConnectionTimeoutMs;
    }

    public void setZookeeperConnectionTimeoutMs(int zookeeperConnectionTimeoutMs) {
        this.zookeeperConnectionTimeoutMs = zookeeperConnectionTimeoutMs;
    }

    public int getZookeeperSyncTimeMs() {
        return zookeeperSyncTimeMs;
    }

    public void setZookeeperSyncTimeMs(int zookeeperSyncTimeMs) {
        this.zookeeperSyncTimeMs = zookeeperSyncTimeMs;
    }

    public String getOffsetsStorage() {
        return offsetsStorage;
    }

    public void setOffsetsStorage(String offsetsStorage) {
        this.offsetsStorage = offsetsStorage;
    }

    public int getOffsetChannelBackoffMs() {
        return offsetChannelBackoffMs;
    }

    public void setOffsetChannelBackoffMs(int offsetChannelBackoffMs) {
        this.offsetChannelBackoffMs = offsetChannelBackoffMs;
    }

    public int getOffsetsChannelSocketTimeoutMs() {
        return offsetsChannelSocketTimeoutMs;
    }

    public void setOffsetsChannelSocketTimeoutMs(int offsetsChannelSocketTimeoutMs) {
        this.offsetsChannelSocketTimeoutMs = offsetsChannelSocketTimeoutMs;
    }

    public int getOffsetsCommitMaxRetries() {
        return offsetsCommitMaxRetries;
    }

    public void setOffsetsCommitMaxRetries(int offsetsCommitMaxRetries) {
        this.offsetsCommitMaxRetries = offsetsCommitMaxRetries;
    }

    public boolean isDualCommitEnabled() {
        return dualCommitEnabled;
    }

    public void setDualCommitEnabled(boolean dualCommitEnabled) {
        this.dualCommitEnabled = dualCommitEnabled;
    }

    public String getPartitionAssignmentStrategy() {
        return partitionAssignmentStrategy;
    }

    public void setPartitionAssignmentStrategy(String partitionAssignmentStrategy) {
        this.partitionAssignmentStrategy = partitionAssignmentStrategy;
    }

    public DefaultKafkaConsumerClass() {
    }

    public DefaultKafkaConsumerClass(List<String> bootstrapServers,String groupID) {
        this.bootstrapServers = bootstrapServers;
        this.groupId =groupID;
        buildProperties();
        buildKafkaConsumer();
    }

    public Properties buildProperties(){

        properties = new Properties();

        if(bootstrapServers != null){
            properties.put("bootstrap.servers",bootstrapServers);
        }

        if(groupId != null){
            properties.put("group.id",groupId);
        }

        if(zookeeperConnect != null){
            properties.put("zookeeper.connect",zookeeperConnect);
        }

        if(consumerId != null){
            properties.put("consumer.id",consumerId);
        }

//        properties.put("socket.timeout.ms",socketTimeoutMs);
//
//        properties.put("socket.receive.buffer.bytes",socketReceiveBufferBytes);
//
//        properties.put("fetch.message.max.bytes",fetchMessageMaxBytes);
//
//        properties.put("num.consumer.fetchers",numConsumerFetchers);
//
        properties.put("enable.auto.commit",enableAutoCommit);
//
////        properties.put("auto.commit.interval.ms",autoCommitIntervalMs);
//
//        properties.put("queued.max.message.chunks",queuedMaxMessageChunks);
//
//        properties.put("rebalance.max.retries",rebalanceMaxRetries);
//
//        properties.put("fetch.min.bytes",fetchMinBytes);
//
//        properties.put("fetch.wait.max.ms",fetchWaitMaxMs);
//
//        properties.put("rebalance.backoff.ms",rebalanceBackoffMs);
//
//        properties.put("refresh.leader.backoff.ms",refreshLeaderBackoffMs);
//
        if(autoOffsetReset != null) {
            properties.put("auto.offset.reset", autoOffsetReset);
        }
//
//        properties.put("consumer.timeout.ms", consumerTimeoutMs);
//
//        properties.put("exclude.internal.topics", excludeInternalTopics);
//
////        if(parititionAssignmentStrategy != null) {
////            properties.put("paritition.assignment.strategy", parititionAssignmentStrategy);
////        }
//
//        if(clientId != null) {
//            properties.put("client.id", clientId);
//        }
//
//        properties.put("zookeeper.session.timeout.ms", zookeeperSessionTimeoutMs);
//
//        properties.put("zookeeper.connection.timeout.ms" ,zookeeperConnectionTimeoutMs);
//
//        properties.put("zookeeper.sync.time.ms" ,zookeeperSyncTimeMs);
//
//        if (offsetsStorage !=null) {
//            properties.put("offsets.storage", offsetsStorage);
//        }
//
//        properties.put("offset.channel.backoff.ms" ,offsetChannelBackoffMs);
//
//        properties.put("offsets.channel.socket.timeout.ms" ,offsetsChannelSocketTimeoutMs);
//
//        properties.put("offsets.commit.max.retries" ,offsetsCommitMaxRetries);
//
//        properties.put("dual.commit.enabled" ,dualCommitEnabled);
//
//        if(partitionAssignmentStrategy != null) {
//            properties.put("partition.assignment.strategy", partitionAssignmentStrategy);
//        }

        properties.put("key.deserializer", keyDeserializer);

        properties.put("value.deserializer", valueDeserializer);

        return properties;
    }

    public DefaultKafkaConsumerClass(Map<String,Object> configMap) {

        if(this.properties==null){
            buildProperties();
        }
        configMap.forEach((key, value) -> {
            if (this.properties.get(key) != null) {
                this.properties.remove(key);
                this.properties.put(key,value);
            }else{
                this.properties.put(key,value);
            }
        });
        buildKafkaConsumer();
    }

    public DefaultKafkaConsumerClass(DefaultKafkaConsumerClass dKPC) {
        this.properties = dKPC.buildProperties();
        buildKafkaConsumer();
    }

    public Consumer<K, V> buildKafkaConsumer(){
        if(properties == null){
            return null;
        }
        consumer = new KafkaConsumer<K, V>(properties);
        return consumer;
    }

    @Override
    public ConsumerRecords<K, V> poll(Collection topic) {
        consumer.subscribe(topic);// 订阅消息
        ConsumerRecords<K, V> records = (ConsumerRecords<K, V>) consumer.poll(pollTime);

        return records;
    }

    @Override
    public ConsumerRecords<K, V> poll(Collection topic,int pollTime) {
        this.pollTime = pollTime;
        consumer.subscribe(topic);// 订阅消息
        ConsumerRecords<K, V> records = consumer.poll(pollTime);
        return records;
    }

    @Override
    public ConsumerRecords<K, V> poll(Collection topic, Duration duration) {
        consumer.subscribe(topic);// 订阅消息
        ConsumerRecords<K, V> records = (ConsumerRecords<K, V>) consumer.poll(duration);
        return records;
    }

    @Override
    public ConsumerRecords<K, V> pollNotAutoCommitdefault(Collection topic) {

        if(!(boolean) properties.get("enable.auto.commit")){
            return null;
        }
        consumer.subscribe(topic);
        final int minBatchSize = 200;
        List<ConsumerRecord<K, V>> buffer = new ArrayList<>();
        ConsumerRecords<K, V> records = (ConsumerRecords<K, V>) consumer.poll(pollTime);

        for (ConsumerRecord<K, V> record : records) {
            buffer.add(record);
        }

        if (buffer.size() >= minBatchSize) {
            // operation to handle data
            consumer.commitSync();
        }
        return records;
    }

    @Override
    public ConsumerRecords<K, V> pollNotAutoCommitdefault(Collection topic, int pollTime) {
        if(!(boolean) properties.get("enable.auto.commit")){
            return null;
        }
        consumer.subscribe(topic);
        this.pollTime = pollTime;
        final int minBatchSize = 200;
        List<ConsumerRecord<K, V>> buffer = new ArrayList<>();
        ConsumerRecords<K, V> records = (ConsumerRecords<K, V>) consumer.poll(pollTime);
        for (ConsumerRecord<K, V> record : records) {
            buffer.add(record);
        }

        if (buffer.size() >= minBatchSize) {
            // operation to handle data
            consumer.commitSync();
        }
        return records;
    }

    @Override
    public ConsumerRecords<K, V> pollNotAutoCommitdefault(Collection topic, Duration duration) {
        if(!(boolean) properties.get("enable.auto.commit")){
            return null;
        }
        consumer.subscribe(topic);
        final int minBatchSize = 200;
        List<ConsumerRecord<K, V>> buffer = new ArrayList<>();
        ConsumerRecords<K, V> records = (ConsumerRecords<K, V>) consumer.poll(duration);
        for (ConsumerRecord<K, V> record : records) {
            buffer.add(record);
        }

        if (buffer.size() >= minBatchSize) {
            // operation to handle data
            consumer.commitSync();
        }
        return records;
    }

    @Override
    public ConsumerRecords<K, V> pollNotAutoCommitPartitiondefault(Collection topic, Duration duration) {
        consumer.subscribe(topic);
        ConsumerRecords<K, V> records = (ConsumerRecords<K, V>) consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<K, V>> partitionRecords = records.records(partition);
//            for (ConsumerRecord<String, String> record : partitionRecords) {
//                logger.info(record.offset() + " : " + record.value());
//            }
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
        return records;
    }


    @Override
    public void commitSync(Collection topic){
        consumer.subscribe(topic);
        consumer.commitSync();
    }

    @Override
    public void commitSync(Collection topic,Map singletonMap) {
        consumer.subscribe(topic);
        consumer.commitSync(singletonMap);
    }

    @Override
    public void close(){
        consumer.close();
    }

}
