package com.sumbo.config.message;

import com.sumbo.config.reader.KafkaReader;
import com.sumbo.config.reader.ReaderConfig;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;

import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;

/**
 * \* User: MeiZhongHao
 * \* Date: 2019-06-06
 * \* Time: 10:24
 * \* Description:
 * \
 */
public class KafkaOffsetManager {

    public static Map<String, KafkaOffsetManager> ALL_PARTITION_OFFSET_MANAGER = new HashMap<>();

    private static final String split_char = "#";
    /**
     * 对应ClientName
     */
    private String client;
    /**
     * 对应消费者分区
     */
    private int partition;
    /***
     * 对应消费者Topic
     */
    private String topic;

    private long lastCommitTime;

    /**
     * 消息顺序存储，确保每个partition的消息顺序提交
     */
    private volatile SortedMap<Long, KafkaMessageOffset> pendingToCommit = new TreeMap<Long, KafkaMessageOffset>();

    public KafkaOffsetManager(String client, int partition, String topic) {
        this.client = client;
        this.partition = partition;
        this.topic = topic;
    }

    private static void pendToCommit(KafkaMessageOffset offset) {
        String key = topicPartitionKey(offset);
        ALL_PARTITION_OFFSET_MANAGER.putIfAbsent(key, new KafkaOffsetManager(offset.getClient(), offset.getPartition(), offset.getTopic()));
        ALL_PARTITION_OFFSET_MANAGER.get(key).getPendingToCommit().put(offset.getOffset(), offset);
    }

    public static void commit(KafkaMessageOffset offset) {
        // pre commit
        pendToCommit(offset);
        // try commit
        long curr = System.currentTimeMillis();
        KafkaOffsetManager offsetManager = ALL_PARTITION_OFFSET_MANAGER.get(topicPartitionKey(offset));
        // 根据配置设置的commit interval 处理消息
        if (curr - offsetManager.getLastCommitTime() < ReaderConfig.getAutoCommitIntervalMs(offset.getClient())) {
            return;
        } else {
            offsetManager.setLastCommitTime(curr);
            // kafka配置设置的提交时间间隔
            TopicPartition partition = new TopicPartition(offsetManager.getTopic(), offsetManager.getPartition());
            // 不是所有预提交的数据都能被提交，复制一份预处理的数据，再将能提交的offset去除 即在 TODO 2时的判断
            // 使用TreeMap ,实现顺序写入
            SortedMap<Long, KafkaMessageOffset> toCommit = new TreeMap<>(offsetManager.getPendingToCommit());

            long lastProcessedOffset = 0L;
            boolean canCommit = false;
            for (Map.Entry<Long, KafkaMessageOffset> partitionMessageEntry : offsetManager.getPendingToCommit().entrySet()) {
                //判断是否成功处理或者重试超过重试次数 TODO 2
                if (false) {
                    break;
                }
                lastProcessedOffset = partitionMessageEntry.getKey();
                toCommit.remove(partitionMessageEntry.getKey());
                canCommit = true;
            }
            offsetManager.getPendingToCommit().clear();
            offsetManager.setPendingToCommit(toCommit);
            Map<TopicPartition, OffsetAndMetadata> offsetPartition = new HashMap<>();
            if (canCommit) {
                OffsetAndMetadata oam = new OffsetAndMetadata(lastProcessedOffset + 1);
                offsetPartition.put(partition, oam);
            }
            if (!offsetPartition.isEmpty()) {
                try {
                    KafkaReader.instance.getConsumerMap().get(offset.getClient()).commitAsync(offsetPartition, new OffsetCommitCallback() {
                        @Override
                        public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
                            for (TopicPartition partition : map.keySet()) {
                                System.out.println(Thread.currentThread().getName() + "--------------commit success----" + partition.topic() + "/" + partition.partition() + "/" + map.get(partition).offset());
                            }
                        }
                    });
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }


    public static String topicPartitionKey(KafkaMessageOffset kafkaMessage) {
        return kafkaMessage.getClient() + split_char + kafkaMessage.getPartition() + split_char + kafkaMessage.getTopic();
    }

    public static String topicPartitionKey(String client, int partition, String topic) {
        return client + split_char + partition + split_char + topic;
    }


    public String getClient() {
        return client;
    }

    public void setClient(String client) {
        this.client = client;
    }

    public int getPartition() {
        return partition;
    }

    public void setPartition(int partition) {
        this.partition = partition;
    }

    public String getTopic() {
        return topic;
    }

    public void setTopic(String topic) {
        this.topic = topic;
    }

    public long getLastCommitTime() {
        return lastCommitTime;
    }

    public void setLastCommitTime(long lastCommitTime) {
        this.lastCommitTime = lastCommitTime;
    }

    public SortedMap<Long, KafkaMessageOffset> getPendingToCommit() {
        return pendingToCommit;
    }

    public void setPendingToCommit(SortedMap<Long, KafkaMessageOffset> pendingToCommit) {
        this.pendingToCommit = pendingToCommit;
    }
}