package cn.lgwen.candy.connector.kafka010.source;

import cn.lgwen.candy.runtime.core.Checkpoint;
import cn.lgwen.candy.runtime.operator.Collector;
import cn.lgwen.candy.runtime.operator.Fetcher;
import cn.lgwen.candy.runtime.operator.SourceOperator;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

import java.util.*;
import java.util.stream.Collectors;

/**
 * 2019/12/25
 * aven.wu
 * danxieai258@163.com
 * kafka source
 */
public class KafkaSource010 extends SourceOperator<String> implements Checkpoint {

    private boolean isRunning = true;

    private Properties kafkaProperties;

    private KafkaConsumer<String, String> consumer;

    private List<String> topics;
    // 保存每个topic 对应的partition info
    private Map<String, List<KafkaPartitionInfo>> kafkaPartitionInfos;

    private List<KafkaPartitionInfo> perCommitKafkaPartitionInfos;

    public KafkaSource010(Properties kafkaProperties, String... topics) {
        this.kafkaProperties = kafkaProperties;
        consumer = new KafkaConsumer<>(kafkaProperties);
        perCommitKafkaPartitionInfos = new LinkedList<>();
        kafkaPartitionInfos = new HashMap<>();
        this.topics = Arrays.asList(topics);
    }

    @Override
    public void run(Fetcher<String> fetcher) {
        consumer.subscribe(topics);
        for (String topic : topics) {
            List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
            List<KafkaPartitionInfo> partinfos = new ArrayList<>();
            partitionInfos.forEach(partition -> partinfos.add(new KafkaPartitionInfo(partition)));
            kafkaPartitionInfos.put(topic, partinfos);
        }
        while (isRunning) {
            // 获取每个topic的partation
            // consumer.partitionsFor();
            Set<TopicPartition> topicPartitions = consumer.assignment();
            ConsumerRecords<String, String> consumerRecords = consumer.poll(100);
            if (consumerRecords.isEmpty()) continue;
            for (TopicPartition tp : topicPartitions) {
                //通过具体的partition把该partition中的数据拿出来消费
                List<ConsumerRecord<String, String>> partitionRecords = consumerRecords.records(tp);
                if (partitionRecords.isEmpty()) continue;
                for (ConsumerRecord<String, String> r : partitionRecords) {
                    //collector.collect(r.value());
                    fetcher.produce(r.value());
                }
                if (checkpoint) {
                    // 存在隐患，多线程情况下可能会获取到一个不在记录里面的partition
                    KafkaPartitionInfo partInfo =
                            kafkaPartitionInfos.get(tp.topic()).stream()
                                    .filter(part -> part.getPartition() == tp.partition())
                                    .findAny().orElseGet(KafkaPartitionInfo::new);
                    //Attention 某个partition 返回的数据可能是空的
                    long newOffset = partitionRecords.get(partitionRecords.size() - 1).offset() + 1;
                    partInfo.setOffset(newOffset);
                }
            }
            if (!checkpoint) {
                consumer.commitAsync();
            }
        }
    }

    @Override
    public void cancel() {
        this.isRunning = false;
    }


    @Override
    public void snapshot() {
        perCommitKafkaPartitionInfos.clear();
        kafkaPartitionInfos.forEach((topic, partitionInfo) ->
                partitionInfo.forEach(part -> {
                    KafkaPartitionInfo kafkaPartitionInfo = new KafkaPartitionInfo(
                            part.getTopic(),
                            part.getPartition(),
                            part.getOffset()
                    );
                    perCommitKafkaPartitionInfos.add(kafkaPartitionInfo);
                }));
    }

    @Override
    public void commit() {
        if (!perCommitKafkaPartitionInfos.isEmpty()) {
            perCommitKafkaPartitionInfos.stream()
                    .filter(x -> x.getOffset() > 0)
                    .collect(Collectors.toList())
                    .forEach(part -> {
                consumer.commitSync(Collections.singletonMap(new TopicPartition(part.getTopic(), part.getPartition()),
                        new OffsetAndMetadata(part.getOffset())));
            });
        }

    }
}
