package com.code.soulkafka.kafka;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;

/**
 * @author jdy
 * @date 3:08 下午
 **/
@Slf4j
public class SimpleKafkaConsumerContainer implements Runnable {

    private volatile boolean running;

    private List<String> topic;

    private String handlerName;

    private Map<String, Object> kafkaConfig;

    private KafkaConsumer<String, String> consumer;

    private boolean seekToEnd;
    /**
     * 机房
     */
    private String idc;

    public SimpleKafkaConsumerContainer(List<String> topic, String handlerName,
                                        Map<String, Object> kafkaConfig,
                                        boolean seekToEnd) {
        this.topic = topic;
        this.handlerName = handlerName;
        this.kafkaConfig = kafkaConfig;
        this.consumer = new KafkaConsumer<>(kafkaConfig);
        this.seekToEnd = seekToEnd;
    }

    @Override
    public void run() {
        if (running) {
            return;
        }
        running = true;
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(kafkaConfig);
        try {
            subscribe(consumer);
            handleData(consumer);
        } finally {
            stop();
        }
    }

    private void handleData(KafkaConsumer<String, String> consumer) {
        while (isRunning()) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(10));
            //处理数据
            List<ConsumerRecord<String, String>> recordList = StreamSupport.stream(records.spliterator(), false)
                    .collect(Collectors.toList());
            if (recordList.size() > 0) {
                log.info("当前线程: {}, 分区: {}", Thread.currentThread().getName(), records.partitions());
            }
        }
    }

    private void subscribe(KafkaConsumer<String, String> consumer) {
        consumer.subscribe(topic, new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                log.info("onPartitionsRevoked is {}", partitions);
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                log.info("onPartitionsAssigned is {}", partitions);
                if (seekToEnd) {
                    consumer.seekToEnd(partitions);
                }
            }
        });
    }

    public boolean isRunning() {
        return running;
    }

    public boolean stop() {
        consumer.unsubscribe();
        consumer.close();
        return true;
    }

    public void clear() {
        running = false;
    }

}
