package com.captjack.bigdata.flume.kafka;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.PollableSource;
import org.apache.flume.conf.Configurable;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.instrumentation.SourceCounter;
import org.apache.flume.source.AbstractSource;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * 自定义kafka-source
 *
 * @author Jack Sparrow
 * @version 1.0.0
 * @date 2018/7/18 22:34
 * package com.captjack.bigdata.flume
 */
public class CustomizeKafkaSource extends AbstractSource implements Configurable, PollableSource {

    private int sourceBatchSize = 100000;

    private int rollInterval = 60;

    private SourceCounter counter;

    private List<Event> eventList = new ArrayList<Event>();

    private static Log logger = LogFactory.getLog(CustomizeKafkaSource.class);

    private CustomizeKafkaConsumer customizeKafkaConsumer;

    @Override
    public void configure(Context context) {

        sourceBatchSize = context.getInteger("rollCount", sourceBatchSize);

        rollInterval = context.getInteger("rollInterval", rollInterval);

        String topic = context.getString("kafka.topic");
        if ((topic == null) || ("".equals(topic.trim()))) {
            logger.error("topic is null");
            return;
        }

        String groupId = context.getString("kafka.groupid");
        if ((groupId == null) || ("".equals(groupId.trim()))) {
            logger.error("groupId is null");
            return;
        }

        String zookeeperConn = context.getString("kafka.zookeeper.conn");
        if ((zookeeperConn == null) || ("".equals(zookeeperConn))) {
            logger.error("zookeeper conn is null");
            return;
        }

        int threadNum = context.getInteger("kafka.numThreads");
        this.customizeKafkaConsumer = new CustomizeKafkaConsumer(zookeeperConn, groupId, topic, threadNum);
        this.customizeKafkaConsumer.start();
        this.counter = new SourceCounter(getName());
        this.counter.start();
        logger.info("zookeeperConn:" + zookeeperConn + ", topic:" + topic + ", groupId:" + groupId + ", threadNum:" + threadNum);
        logger.info("configure ....rollCount: " + sourceBatchSize + " , rollInterval: " + rollInterval);
    }

    @Override
    public PollableSource.Status process() {
        PollableSource.Status status;
        String rawMessage;
        Event event;
        this.eventList.clear();
        long startTimeMilSecond = System.currentTimeMillis();
        try {
            while ((this.eventList.size() < sourceBatchSize) && ((System.currentTimeMillis() - startTimeMilSecond) / 1000.0D < rollInterval)) {
                try {
                    rawMessage = CustomizeKafkaConsumer.KAFKA_MESSAGE_QUEUE.poll();
                    if ((rawMessage != null) && (!"".equals(rawMessage))) {
                        Map<String, String> headers = new HashMap<String, String>(5);
                        logger.debug("someHeader+content:" + rawMessage);

                        if ((rawMessage.length() >= 4) && ("hdfs".equals(rawMessage.substring(0, 4)))) {
                            logger.info("his:" + rawMessage);
                            rawMessage = rawMessage.substring(4);
                        } else if (rawMessage.contains("/trace")) {
                            headers.put("someHeader", "trace");
                        }

                        headers.put("category", rawMessage.substring(0, rawMessage.indexOf("-")));
                        headers.put("timestamp", String.valueOf(System.currentTimeMillis()));
                        String message = rawMessage.substring(rawMessage.indexOf("-") + 1);
                        event = new SimpleEvent();
                        event.setBody(message.getBytes());
                        event.setHeaders(headers);
                        this.eventList.add(event);
                    }
                } catch (Exception e) {
                    logger.error(e.getMessage(), e);
                    e.printStackTrace();
                }
            }
            if (logger.isInfoEnabled()) {
                logger.info("batch send size:" + this.eventList.size());
            }

            if (this.eventList.size() > 0) {
                this.counter.addToEventReceivedCount(this.eventList.size());
                getChannelProcessor().processEventBatch(this.eventList);

                this.counter.addToEventAcceptedCount(this.eventList.size());
            }
            status = PollableSource.Status.READY;
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
            e.printStackTrace();
            return PollableSource.Status.BACKOFF;
        }

        return status;
    }

    @Override
    public synchronized void stop() {
        this.customizeKafkaConsumer.shutdown();
        super.stop();
    }

}
