package com.utils.kafka;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;

/**
 * @author xin.ding
 * @date 2019/12/7 15:19
 */
public class Consumer {
    private final static Logger LOGGER = LoggerFactory.getLogger(Consumer.class);

    public static void consumer(String topic) {
        KafkaConsumer consumer = getConsumer();
        consumer.subscribe(Collections.singletonList(topic));
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(300);
                for (ConsumerRecord<String, String> record : records) {
                    LOGGER.debug("partition = {} , offset = {}, key = {}, value = {}"
                            , record.partition(), record.offset(), record.key(), record.value());
                }
                consumer.commitAsync(new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception e) {
                        if (e != null) {
                            LOGGER.error("commit failed for offset {}", offsets, e);
                        }
                    }
                });
            }
        } finally {
            try {
                consumer.commitSync();
            } finally {
                release(consumer);
            }
        }
    }


    public static void consumerBatch(String topic, int num) {
        KafkaConsumer consumer = getConsumer();
        ConcurrentHashMap<TopicPartition, OffsetAndMetadata> currentOffsets = new ConcurrentHashMap<>(16);
        int count = 0;
        consumer.subscribe(Collections.singletonList(topic));
        try {
            while (true) {
                final ConsumerRecords<String, String> consumerRecords = consumer.poll(300);
                for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                    System.out.println(consumerRecord.value());
                    currentOffsets.put(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), new OffsetAndMetadata(consumerRecord.offset() + 1, "no metadata"));
                    if (count % num == 0) {
                        consumer.commitAsync(currentOffsets, new OffsetCommitCallback() {
                            @Override
                            public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception e) {
                                if (e != null) {
                                    LOGGER.error("commit failed for offset {}", offsets, e);
                                }
                            }
                        });
                    }
                    count++;
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                consumer.commitSync();
            } finally {
                release(consumer);
            }
        }
    }


    public static void consumerFromDetailPosition(String topic, long line) {
        final KafkaConsumer consumer = getConsumer();
        final ConcurrentHashMap<TopicPartition, OffsetAndMetadata> currentOffsets = new ConcurrentHashMap<TopicPartition, OffsetAndMetadata>(16);
        try {
            consumer.subscribe(Collections.singletonList(topic), new ConsumerRebalanceListener() {
                @Override
                public void onPartitionsRevoked(Collection<TopicPartition> collection) {
                    consumer.commitSync(currentOffsets);
                }

                @Override
                public void onPartitionsAssigned(Collection<TopicPartition> collection) {
                    //consumer.seekToBeginning(collection);
                    for (TopicPartition topicPartition : collection) {
                        consumer.seek(topicPartition, line);
                    }

                }
            });

            while (true) {
                final ConsumerRecords<String, String> records = consumer.poll(300);
                for (ConsumerRecord<String, String> record : records) {
                    System.out.println(String.format("partition = %d , offset = %d, key = %s, value = %s", record.partition(),
                            record.offset(), record.key(), record.value()));
                    currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1, "no metadata"));
                }
                consumer.commitAsync(currentOffsets, new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception e) {
                        if (e != null) {
                            LOGGER.error("commit failed for offset {}", offsets, e);
                        }
                    }
                });
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                consumer.commitSync();
            } finally {
                release(consumer);
            }
        }

    }


    private static void release(final KafkaConsumer consumer) {
        Runtime.getRuntime().addShutdownHook(new Thread() {

            @Override
            public void run() {
                super.run();
                if (consumer != null) {
                    consumer.close();
                }
            }
        });
    }


    /**
     * 获取消费者实例
     *
     * @return 获取消费者实例
     */
    public static KafkaConsumer getConsumer() {
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, Common.KAFKA_BOOTSTRAP_SERVER);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, Common.KAFKA_GROUP_ID);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        return new KafkaConsumer(props);
    }
}