package com.sumbo.config.reader;

import com.sumbo.config.message.KafkaMessageOffset;
import com.sumbo.config.message.KafkaOffsetManager;
import com.sumbo.config.pipeline.PipelineDispatch;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * \* User: MeiZhongHao
 * \* Date: 2019-06-05
 * \* Time: 17:19
 * \* Description: kafka输入
 * \
 */
public class KafkaReader extends Reader {

    private KafkaReader() {
    }

    public static KafkaReader instance = new KafkaReader();

    private Map<String, KafkaConsumer> consumerMap = new HashMap<String, KafkaConsumer>();

    private ExecutorService executor;

    private volatile boolean stopRead;

    public void init(Map<String, Properties> temp) {
        if (temp == null || temp.size() <= 0) {
            return;
        }
        for (String key : temp.keySet()) {
            KafkaConsumer consumer = new KafkaConsumer<String, String>(temp.get(key));
            consumer.subscribe(Arrays.asList(temp.get(key).getProperty("topic").split(",")));
            consumerMap.put(key, consumer);
        }
        executor = Executors.newFixedThreadPool(consumerMap.size());
    }

    @Override
    public void open() {
        for (String key : consumerMap.keySet()) {
            executor.submit(() -> read(key));
        }
    }

    public void read(String name) {
        KafkaConsumer consumer = consumerMap.get(name);
        while (true && !stopRead) {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            // 拉取记录数
            int pollCount = records.count();
            // 重复数量
            int repeatCount = 0;

            for (ConsumerRecord<String, String> record : records) {
                KafkaMessageOffset offsest = new KafkaMessageOffset(name, record.offset(), record.topic(), record.partition());
                // 根据不同的管道。对消息流做对应处理
                if (PipelineDispatch.dispatch(name, record.value())) {
                    // 处理完成，提交偏移量
                    KafkaOffsetManager.commit(offsest);
                }
            }
        }
        // 服务停止前提交剩余的所有消息
        commitRemaining();
    }

    private void commitRemaining() {
        // TODO
    }

    @Override
    public void close() {
        if (consumerMap.isEmpty()) {
            return;
        }
        for (String client : consumerMap.keySet()) {
            close(client);
        }
    }

    public void close(String client) {
        // TODO
    }

    public Map<String, KafkaConsumer> getConsumerMap() {
        return consumerMap;
    }
}