package com.sinux.generality.basesupport.utils.kafka.consumer;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import java.time.Duration;
import java.util.*;

public class SimpleKafkaConsumerClass<K, V> implements KafkaConsumerInterface {

    //    broker的地址清单，建议至少填写两个，避免宕机
    private List<String> bootstrapServers = null;


    //  用来唯一标识consumer进程所在组的字符串，如果设置同样的group  id，表示这些processes都是属于同一个consumer  group
    private String groupId;

    //配置集合
    private Properties properties = null;

    //poll时间
    private int pollTime = 1000;

    private  String keyDeserializer="com.sinux.generality.basesupport.utils.kafka.serialization.StringDeserializer";

    private  String valueDeserializer = "com.sinux.generality.basesupport.utils.kafka.serialization.TransferObjectDeserializer";

    //KafkaConsumer
    private Consumer<K,V> consumer = null;

    public List<String> getBootstrapServers() {
        return bootstrapServers;
    }

    public void setBootstrapServers(List<String> bootstrapServers) {
        this.bootstrapServers = bootstrapServers;
    }

    public String getGroupId() {
        return groupId;
    }

    public void setGroupId(String groupId) {
        this.groupId = groupId;
    }

    public SimpleKafkaConsumerClass() {
    }

    public SimpleKafkaConsumerClass(List<String> bootstrapServers, String groupID) {
        this.bootstrapServers = bootstrapServers;
        this.groupId =groupID;
        buildProperties();
        buildKafkaConsumer();
    }

    public Properties buildProperties(){

        properties = new Properties();

        if(bootstrapServers != null){
            properties.put("bootstrap.servers",bootstrapServers);
        }

        if(groupId != null){
            properties.put("group.id",groupId);
        }

//        if(partitionAssignmentStrategy != null) {
//            properties.put("partition.assignment.strategy", partitionAssignmentStrategy);
//        }

        properties.put("key.deserializer", keyDeserializer);

        properties.put("value.deserializer", valueDeserializer);

        return properties;
    }

    public SimpleKafkaConsumerClass(Map<String,Object> configMap) {

        if(this.properties==null){
            buildProperties();
        }
        configMap.forEach((key, value) -> {
            if (this.properties.get(key) != null) {
                this.properties.remove(key);
                this.properties.put(key,value);
            }else{
                this.properties.put(key,value);
            }
        });
        buildKafkaConsumer();
    }

    public SimpleKafkaConsumerClass(SimpleKafkaConsumerClass dKPC) {
        this.properties = dKPC.buildProperties();
        buildKafkaConsumer();
    }

    public Consumer<K,V> buildKafkaConsumer(){
        if(properties == null){
            return null;
        }
        consumer = new KafkaConsumer<K,V>(properties);
        return consumer;
    }

    @Override
    public ConsumerRecords<K,V> poll(Collection topic) {
        consumer.subscribe(topic);// 订阅消息
        ConsumerRecords<K,V> records = (ConsumerRecords<K,V>) consumer.poll(pollTime);
        return records;
    }

    @Override
    public ConsumerRecords<K,V> poll(Collection topic,int pollTime) {
        this.pollTime = pollTime;
        consumer.subscribe(topic);// 订阅消息
        ConsumerRecords<K,V> records = (ConsumerRecords<K,V>) consumer.poll(pollTime);
        return records;
    }

    @Override
    public ConsumerRecords<K,V> poll(Collection topic, Duration duration) {
        consumer.subscribe(topic);// 订阅消息
        ConsumerRecords<K,V> records = (ConsumerRecords<K,V>) consumer.poll(duration);
        return records;
    }

    @Override
    public ConsumerRecords<K,V> pollNotAutoCommitdefault(Collection topic) {
        if(!(boolean) properties.get("auto.commit.enable")){
            return null;
        }
        consumer.subscribe(topic);
        final int minBatchSize = 200;
        List<ConsumerRecord<K,V>> buffer = new ArrayList<>();
        ConsumerRecords<K,V> records = (ConsumerRecords<K,V>) consumer.poll(pollTime);
        for (ConsumerRecord<K, V> record : records) {
            buffer.add(record);
        }

        if (buffer.size() >= minBatchSize) {
            // operation to handle data
            consumer.commitSync();
        }
        return records;
    }

    @Override
    public ConsumerRecords<K,V> pollNotAutoCommitdefault(Collection topic, int pollTime) {
        if(!(boolean) properties.get("auto.commit.enable")){
            return null;
        }
        consumer.subscribe(topic);
        this.pollTime = pollTime;
        final int minBatchSize = 200;
        List<ConsumerRecord<K,V>> buffer = new ArrayList<>();
        ConsumerRecords<K,V> records = (ConsumerRecords<K,V>) consumer.poll(pollTime);
        for (ConsumerRecord<K,V> record : records) {
            buffer.add(record);
        }

        if (buffer.size() >= minBatchSize) {
            // operation to handle data
            consumer.commitSync();
        }
        return records;
    }

    @Override
    public ConsumerRecords<K,V> pollNotAutoCommitdefault(Collection topic, Duration duration) {
        if(!(boolean) properties.get("auto.commit.enable")){
            return null;
        }
        consumer.subscribe(topic);
        final int minBatchSize = 200;
        List<ConsumerRecord<K,V>> buffer = new ArrayList<>();
        ConsumerRecords<K,V> records = (ConsumerRecords<K,V>) consumer.poll(duration);
        for (ConsumerRecord<K,V> record : records) {
            buffer.add(record);
        }

        if (buffer.size() >= minBatchSize) {
            // operation to handle data
            consumer.commitSync();
        }
        return records;
    }

    @Override
    public ConsumerRecords<K,V> pollNotAutoCommitPartitiondefault(Collection topic, Duration duration) {
        consumer.subscribe(topic);
        ConsumerRecords<K,V> records = (ConsumerRecords<K,V>) consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<K,V>> partitionRecords = records.records(partition);
//            for (ConsumerRecord<K,V> record : partitionRecords) {
//                logger.info(record.offset() + " : " + record.value());
//            }
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
        return records;
    }

    @Override
    public void commitSync(Collection topic){
        consumer.subscribe(topic);
        consumer.commitSync();
    }

    @Override
    public void commitSync(Collection topic,Map singletonMap) {
        consumer.subscribe(topic);
        consumer.commitSync(singletonMap);
    }

    @Override
    public void close() {
        consumer.close();
    }

}
