package com.event.ingestion.kafka;

import com.event.ingestion.IngestionExecutor;
import com.event.ingestion.common.Persistable;
import com.event.ingestion.config.LoadConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.util.*;

/**
 * @author ZzHh
 * @Classname BaseConsumer
 * @Description TODO
 * @Date:2020/06/18 11:59
 * @Create by IntelliJ IDEA
 **/

/**
 * Consumer公共接口BaseConsumer
 */
public abstract class BaseConsumer implements IngestionExecutor {
    //kafka broker url
    private String kafkaBrokerUrl = null;

    //指定消费者数据topic
    protected abstract String getKafkaTopic();

    //手动控制commit
    protected abstract Boolean getKafkaAutoCommit();

    //每次能够从topic读取的最大记录数
    protected int getMaxPollRecords() {
        return 6400;
    }

    //指定consumer组
    protected abstract String getKafkaConsumerGrp();

    //writers
    private Persistable [] writers = null;

    //constructor(构造函数)
    public BaseConsumer(Persistable [] writers) {
        this.writers = writers;
    }

    public void initialize(Properties properties) {
        this.kafkaBrokerUrl = properties.getProperty(LoadConfig.kafkaBrokerUrl);
        //校验
        if (this.writers != null && this.writers.length > 0) {
            for (Persistable writer : writers){
                writer.initialize(properties);
            }
        }
    }

    //consumer
    protected void consume() throws Exception {
        if (this.kafkaBrokerUrl == null || this.kafkaBrokerUrl.isEmpty()) {
            throw new Exception("kafka broker url is not initialize!");
        }

        //create properties for the consumer
        Properties properties = new Properties();
        properties.put("bootstrap.servers",this.kafkaBrokerUrl);
        properties.put("group.id",this.getKafkaConsumerGrp());
        properties.put("enable.auto.commit",this.getKafkaAutoCommit() ? "true" : "false");
        properties.put("auto.offset.reset","earliest");  //consumer从最早开始的地方开始消费数据,latest:从上一次结束的地方开始
        properties.put("request.timeout.ms",1800000);  //从发送请求到接收到ACK确认等待的最长时间
        //properties.put("session.timeout.ms",1200000);
        properties.put("max.poll.records",Integer.toString(this.getMaxPollRecords()));  //每次可以拿多少条记录
        //key-value序列化
        properties.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        //create consumer
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);

        //the topic
        List<TopicPartition> topics = Arrays.asList(new TopicPartition(getKafkaTopic(),0),new TopicPartition(getKafkaTopic(),1),new TopicPartition(getKafkaTopic(),2));
        //consumer.subscribe(Arrays.asList(new String [] {getKafkaTopic}));...  subscribe订阅能够自动balance
        //assign  指定分配读取的分区,不能balance
        consumer.assign(topics);
        consumer.seek(topics.get(0),0L);
        consumer.seek(topics.get(1),0L);
        consumer.seek(topics.get(2),0L);
        //System.out.println(getKafkaTopic());

        /*consumer.subscribe(Collections.singletonList(getKafkaTopic()));
        Set<TopicPartition> partitions = consumer.assignment();
        for (TopicPartition partition : partitions) {
            consumer.seek(partition,0);
        }*/

        //写数据
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(3000);
                //number of records
                int recordsCount = (records != null) ? records.count() : 0;
                //check
                if (recordsCount <= 0) {
                    //未拿到数据,等待三秒
                    Thread.sleep(5);
                    continue;
                }
                System.out.println("messages polled..." + recordsCount);

                //check
                if (this.writers != null && this.writers.length > 0 && recordsCount > 0) {
                    for (Persistable writer : writers) {
                        writer.write(records);
                    }

                    //check
                    if (!this.getKafkaAutoCommit()) {
                        //异步提交
                        consumer.commitSync();
                    }
                }
                System.out.println("writing end!");
            }
        }finally {
            consumer.close();
        }
    }

    //程序启动入口
    @Override
    public void executor(String [] args) throws Exception {
        if (args.length < 1) {
            System.out.println("参数异常!");
        }else{
            this.initialize(LoadConfig.loadSettings(args[0]));
            this.consume();
        }
    }
}