package com.jumei.flume.sink.kafka;

import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.jumei.flume.handler.Handler;
import com.jumei.flume.sink.kudu.Config;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.flume.*;
import org.apache.flume.conf.Configurable;
import org.apache.flume.sink.AbstractSink;
import org.apache.flume.sink.kafka.KafkaSinkConstants;
import org.apache.flume.sink.kafka.KafkaSinkUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;


/**
 * 通过kudu接口写数据
 * 
 * @author yihongx
 *
 */
public class KafkaSink extends AbstractSink implements Configurable {
    protected Logger logger = LoggerFactory.getLogger(KafkaSink.class);

    private final static int DEFAULT_BATCH_SIZE = 1000;
    private int batchSize = 0;
	public static final String KEY_HDR = "key";
	public static final String TOPIC_HDR = "topic";
	private Properties kafkaProps;
	private Producer<String, byte[]> producer;
	private String topic;
	private List<KeyedMessage<String, byte[]>> messageList;
	
    private List<Event> eventList;

    /**
     * 业务Handler
     */
    Handler handlers;
    
    @Override
    public Status process() throws EventDeliveryException {
        Status result = Status.READY;
        String eventTopic = null;
		String eventKey = null;

        Channel channel = getChannel();
        Transaction transaction = null;
        
        try {
            long processedEvents = 0;
            
            transaction = channel.getTransaction();
            transaction.begin();

            eventList.clear();
            messageList.clear();
            for (; processedEvents < batchSize; processedEvents += 1) {
                Event event = channel.take();
                if (event == null) {
                	/**
					 * BUG FIX:
					 *  HIGH CPU LOAD
					 * 
					 */
					if (processedEvents == 0) {
						result = Status.BACKOFF;
						logger.debug("FOUND processedEvents == 0.");
					} else { }
                    break;
                }

                eventList.add(event);
            }
            
            try {
				if (CollectionUtils.isNotEmpty(eventList)) {
				    handlers.handler(eventList);
				    
				    for(Event one : eventList) {
				    	if(null == one) continue;
				    	
				    	Map<String, String> headers = one.getHeaders();
//			            if ((eventTopic = headers.get(TOPIC_HDR)) == null) {
//							eventTopic = topic;
//						}
			            eventKey = headers.get(KEY_HDR);
				    	// create a message and add to buffer
						KeyedMessage<String, byte[]> data = new KeyedMessage<String, byte[]>(topic, eventKey, one.getBody());
						messageList.add(data);
				    }
				    
				    //Send to Kafka
				    producer.send(messageList);
				    
				}
			} catch (Exception ex) {
				logger.error("sensorKafkaHandler's error, ", ex);
				throw Throwables.propagate(ex);
			}

            transaction.commit();
        } catch (Exception e) {
        	logger.error("KafkaSink's Failed to publish events", e);
            result = Status.BACKOFF;
            try {
            	transaction.rollback();
            } catch (Exception ex) {
                logger.error("Transaction rollback failed", ex);
                throw Throwables.propagate(e);
            }
            throw new EventDeliveryException(e);
        } finally {
        	transaction.close();
        }

        return result;
    }

    @Override
    public void configure(Context context) {
    	//kafka环境初始化
    	configureKafka(context);
    	
        // 批处理数据数量
        eventList = new ArrayList<>(batchSize);
        
        //kafka handler
        String parserClassName = context.getString(Config.KAFKA_HANDLER);
        Preconditions.checkArgument(!StringUtils.isEmpty(parserClassName), "Must supply a valid handler class name string");
        
        ClassLoader classLoader = getClass().getClassLoader();
        Class parserClass = null;
        try {
            parserClass = classLoader.loadClass(parserClassName);
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        }
        Preconditions.checkArgument(parserClass != null, "parserClass was misconfigured");
        try {
        	handlers = (Handler) parserClass.newInstance();
            handlers.start();
        } catch (Exception e) {
            logger.error("init KuduJdbcSink error!", e);
        }
        
        
    }

    public void configureKafka(Context context) {
		batchSize = context.getInteger(KafkaSinkConstants.BATCH_SIZE, KafkaSinkConstants.DEFAULT_BATCH_SIZE);
		messageList = new ArrayList<KeyedMessage<String, byte[]>>(batchSize);
		logger.debug("Using batch size: {}", batchSize);

		topic = context.getString(KafkaSinkConstants.TOPIC, KafkaSinkConstants.DEFAULT_TOPIC);
		if (topic.equals(KafkaSinkConstants.DEFAULT_TOPIC)) {
			logger.warn("The Property 'topic' is not set. " + "Using the default topic name: "
					+ KafkaSinkConstants.DEFAULT_TOPIC);
		} else {
			logger.info("Using the static topic: " + topic + " this may be over-ridden by event headers");
		}

		kafkaProps = KafkaSinkUtil.getKafkaProperties(context);

		if (logger.isDebugEnabled()) {
			logger.debug("Kafka producer properties: " + kafkaProps);
		}
    }
    
    
    @Override
    public synchronized void start() {
    	if(null == handlers) {
    		handlers.start();
    	}
    	
    	// instantiate the producer
    	ProducerConfig config = new ProducerConfig(kafkaProps);
    	producer = new Producer<String, byte[]>(config);
        super.start();
    }

    @Override
    public synchronized void stop() {
    	if(null != handlers) {
    		handlers.stop();
    	}
    	
    	producer.close();
        super.stop();
    }

}

