package com.ruyuan.ingestion.kafka.consumer;
import com.ruyuan.ingestion.IngestionExecutor;
import com.ruyuan.ingestion.common.Persistable;
import com.ruyuan.ingestion.config.Configuration;
import com.ruyuan.ingestion.utils.KafkaUtils;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.Arrays;
import java.util.Properties;

public abstract class BaseConsumer implements IngestionExecutor {

    private String kafkaBrokerURL = null;

    protected abstract String getKafkaTopic();
    protected abstract Boolean getKafkaAutoCommit();

    protected int getMaxPollRecords() {
        //获取配置文件数据
        return 2000;
    }

    protected int getMaxPollIntervalMillis() {
        return 2000;
    }

    protected abstract String getKafkaConsumerGrp();

    /**
     * 可能写往多个目的地
     * */
    private Persistable[] writes;

    protected abstract Persistable[] getWrites();

    public BaseConsumer() {
        //实例化writes
        this.writes =getWrites();
    }

    public void initialize(Properties properties) {
        this.kafkaBrokerURL = properties.getProperty(Configuration.BROKERS);
        //实例化Persistable的实现
        for(Persistable write:this.writes) {
            write.initialize(properties);
        }
    }

    /**
     * 消费的逻辑
     * */
    protected void consumer() throws Exception {
        if (this.kafkaBrokerURL == null || this.kafkaBrokerURL.isEmpty()) {
            throw new Exception("kafka broker url is not initialized");
        }

        Properties commonProperties = KafkaUtils.getCommonProperties();
        commonProperties.put("bootstrap.servers",this.kafkaBrokerURL);
        commonProperties.put("group.id",this.getKafkaConsumerGrp());
        commonProperties.put("enable.auto.commit",this.getKafkaAutoCommit()?"true":"false");
        commonProperties.put("auto.offset.reset","earliest");
        commonProperties.put("max.poll.records",Integer.toString(this.getMaxPollRecords()));
        commonProperties.put("max.poll.interval.ms",Integer.toString(this.getMaxPollIntervalMillis()));
        commonProperties.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        commonProperties.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String,String> consumer = new KafkaConsumer<String, String>(commonProperties);
        consumer.subscribe(Arrays.asList(new String[]{getKafkaTopic()}));

        try {
            long msgCount = 0;
            long processCount = 0;
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(3000);
                //读取到的记录数
                int recordsCount = (records != null) ? records.count():0;
                if (recordsCount <= 0) {
                    Thread.sleep(2000);
                    continue;
                }

                msgCount += recordsCount;

                //写到目的地
                for (Persistable write:writes) {
                    processCount += write.write(records);
                }

                if (!this.getKafkaAutoCommit()) {
                    consumer.commitAsync();
                }

                //log.info( 读取的记录数据为%d  处理的数据数为%d,msgCount,processCount)
            }
        }
        finally {
            consumer.close();
        }
    }
    @Override
    public void execute(String[] args) {

    }
}
