package org.ykhl.log.commons.receiver;

import org.apache.kafka.clients.consumer.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.ykhl.log.commons.message.LogMessage;
import org.ykhl.mq.commons.Constants;
import org.ykhl.mq.commons.StoppableInfiniteRunnable;
import org.ykhl.mq.commons.Utils;

import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * Created by zkf on 2015/12/10.
 */
public class LogReceiverKafkaImpl extends LogReceiver {
    private static final Logger LOG = LoggerFactory.getLogger(LogReceiverKafkaImpl.class);
    private String consumerGroupName = Constants.MQ_LOG_CONSUMER;
    private Consumer<String, String> consumer = null;
    private ExecutorService executorService;
    private StoppableInfiniteRunnable runner;

    /**
     * Only for development usage, not need to set in production environment.
     *
     * @param consumerGroupName
     */
    public void setConsumerGroupName(String consumerGroupName){
        this.consumerGroupName = consumerGroupName;
    }

    @Override
    protected void doStart() throws Exception {
        runner = new Runner();
        this.executorService = Executors.newSingleThreadExecutor();//只有一个线程的线程池，因此所有提交的任务是顺序执行
        this.executorService.submit(runner);
    }

    @Override
    protected void doStop() throws Exception {
        runner.stop();
    }

    private class Runner extends StoppableInfiniteRunnable {

        @Override
        public void beforeRun() {
            Properties props = Utils.genKafkaConsumerProps(mqNameServerAddress, consumerGroupName, useSsl);
            //尽量一次多收几条消息，最小fetch大小400 bytes
            props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, Constants.KAFKA_BATCH_SIZE_IN_BYTES);
            consumer = new KafkaConsumer<>(props);
            consumer.subscribe(Collections.singletonList(Constants.MQ_LOG_TOPIC));
        }

        @Override
        public void doRun() {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            List<LogMessage> list = new ArrayList<LogMessage>();
            try {
                for(ConsumerRecord<String, String> record : records) {
//                listener.consume(LogMessage.fromJson(record.value()));
                    list.add(LogMessage.fromJson(record.value()));
                }
            }catch (Exception e){

                StringWriter sw = new StringWriter();
                e.printStackTrace(new PrintWriter(sw, true));
                LOG.error("LogReceiverKafkaImpl Runner is error " + sw);
            }

            listener.consume(list);
            if(records != null && !records.isEmpty())
                consumer.commitAsync();
        }

        @Override
        public void afterRun() {
            executorService.shutdown();
            consumer.close();
        }
    }
}
