package com.jumei.flume.source.kafka;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

import org.apache.commons.lang.StringUtils;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.conf.Configurable;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.instrumentation.kafka.KafkaSourceCounter;
import org.apache.flume.source.AbstractPollableSource;
import org.apache.flume.source.kafka.KafkaSource;
import org.apache.flume.source.kafka.KafkaSourceConstants;
import org.apache.flume.source.kafka.KafkaSourceUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.base.Splitter;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Lists;

import kafka.consumer.ConsumerIterator;
import kafka.consumer.ConsumerTimeoutException;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;

/**
 * 记录kafka offset
 * 
 * @author yihongx
 *
 */
public class KafkaOffsetSource extends AbstractPollableSource implements Configurable {

	private static final Logger log = LoggerFactory.getLogger(KafkaSource.class);
	private ConsumerConnector consumer;
	private ConsumerIterator<byte[], byte[]> it;
	private String topic;
	private int batchUpperLimit;
	private int timeUpperLimit;
	private int consumerTimeout;
	private boolean kafkaAutoCommitEnabled;
	private Context context;
	private Properties kafkaProps;
	private final List<Event> eventList = new ArrayList<Event>();
	private KafkaSourceCounter counter;
	
	//kafka分区
	private String DEFAULT_PARTITIONS = "0,1,2,3,4,5,6,7,8,9";
	private List<String> partitions = Lists.newArrayList();
	private static final String P_OFFSET_SEPARATOR = "__";

	@Override
	protected Status doProcess() throws EventDeliveryException {
		byte[] kafkaMessage;
		byte[] kafkaKey;
		Event event;
		Map<String, String> headers;
		long batchStartTime = System.currentTimeMillis();
		long batchEndTime = System.currentTimeMillis() + timeUpperLimit;
		try {
			boolean iterStatus = false;
			long startTime = System.nanoTime();
			while (eventList.size() < batchUpperLimit && System.currentTimeMillis() < batchEndTime) {
				iterStatus = hasNext();
				if (iterStatus) {
					// get next message
					MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
					kafkaMessage = messageAndMetadata.message();
					kafkaKey = messageAndMetadata.key();

					// Add headers to event (topic, timestamp, and key)
					headers = new HashMap<String, String>();
					headers.put(KafkaSourceConstants.TIMESTAMP, String.valueOf(System.currentTimeMillis()));
					headers.put(KafkaSourceConstants.TOPIC, topic);
					long c_offset = messageAndMetadata.offset();
					int p = messageAndMetadata.partition();
					String kafkaKeyStr = new String(kafkaKey);
					if (StringUtils.isNotBlank(kafkaKeyStr)) {
						headers.put(KafkaSourceConstants.KEY, kafkaKeyStr);
					} else {
						headers.put(KafkaSourceConstants.KEY, p + P_OFFSET_SEPARATOR + c_offset);
					}
					
					/**
					 * 添加offset到每条消息的header
					 */
					headers.put(KafkaConstants.TOPIC_OFFSET, String.valueOf(c_offset));
					
					/**
					 * add partition
					 */
					headers.put(KafkaConstants.TOPIC_PARTITION, String.valueOf(p));
					
					if (log.isDebugEnabled()) {
						log.debug("Message: {}", new String(kafkaMessage));
					}
					event = EventBuilder.withBody(kafkaMessage, headers);
					eventList.add(event);
				}
				if (log.isDebugEnabled()) {
					log.debug("Waited: {} ", System.currentTimeMillis() - batchStartTime);
					log.debug("Event #: {}", eventList.size());
				}
			}
			long endTime = System.nanoTime();
			counter.addToKafkaEventGetTimer((endTime - startTime) / (1000 * 1000));
			counter.addToEventReceivedCount(Long.valueOf(eventList.size()));
			// If we have events, send events to channel
			// clear the event list
			// and commit if Kafka doesn't auto-commit
			if (eventList.size() > 0) {
				log.info("Consume {} records from Kafka and start to write to channel.", eventList.size());
				
				Stopwatch watch = Stopwatch.createStarted();
				getChannelProcessor().processEventBatch(eventList);

				log.info("Cost {} ms to write to channel.", watch.elapsed(TimeUnit.MILLISECONDS));
				
				counter.addToEventAcceptedCount(eventList.size());
				eventList.clear();
				if (log.isDebugEnabled()) {
					log.debug("Wrote {} events to channel", eventList.size());
				}
				if (!kafkaAutoCommitEnabled) {
					// commit the read transactions to Kafka to avoid duplicates
					long commitStartTime = System.nanoTime();
					consumer.commitOffsets();
					long commitEndTime = System.nanoTime();
					counter.addToKafkaCommitTimer((commitEndTime - commitStartTime) / (1000 * 1000));
				}
				
			}
			if (!iterStatus) {
				if (log.isDebugEnabled()) {
					counter.incrementKafkaEmptyCount();
					log.debug("Returning with backoff. No more data to read");
				}
				return Status.BACKOFF;
			}
			return Status.READY;
		} catch (Exception e) {
			log.error("KafkaSource EXCEPTION, {}", e);
			return Status.BACKOFF;
		}
	}

	/**
	 * We configure the source and generate properties for the Kafka Consumer
	 *
	 * Kafka Consumer properties are generated as follows:
	 *
	 * 1. Generate a properties object with some static defaults that can be
	 * overridden by Source configuration 2. We add the configuration users added
	 * for Kafka (parameters starting with kafka. and must be valid Kafka Consumer
	 * properties 3. We add the source documented parameters which can override
	 * other properties
	 *
	 * @param context
	 */
	@Override
	protected void doConfigure(Context context) throws FlumeException {
		this.context = context;
		batchUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_SIZE, KafkaSourceConstants.DEFAULT_BATCH_SIZE);
		timeUpperLimit = context.getInteger(KafkaSourceConstants.BATCH_DURATION_MS,
				KafkaSourceConstants.DEFAULT_BATCH_DURATION);
		topic = context.getString(KafkaSourceConstants.TOPIC);

		if (topic == null) {
			throw new ConfigurationException("Kafka topic must be specified.");
		}

		kafkaProps = KafkaSourceUtil.getKafkaProperties(context);
		consumerTimeout = Integer.parseInt(kafkaProps.getProperty(KafkaSourceConstants.CONSUMER_TIMEOUT));
		kafkaAutoCommitEnabled = Boolean.parseBoolean(kafkaProps.getProperty(KafkaSourceConstants.AUTO_COMMIT_ENABLED));

		if (counter == null) {
			counter = new KafkaSourceCounter(getName());
		}
		
		//消费kafka指定分区的数据
		String _partitions = context.getString("partitions", DEFAULT_PARTITIONS);
		partitions = Splitter.on(",").splitToList(_partitions);
		
	}

	@Override
	protected void doStart() throws FlumeException {
		log.info("Starting {}...", this);

		try {
			//initialize a consumer. This creates the connection to ZooKeeper
			consumer = KafkaSourceUtil.getConsumer(kafkaProps);
		} catch (Exception e) {
			throw new FlumeException("Unable to create consumer. "
					+ "Check whether the ZooKeeper server is up and that the " + "Flume agent can connect to it.", e);
		}

		Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
		// We always have just one topic being read by one thread
		topicCountMap.put(topic, 1);

		// Get the message iterator for our topic
		// Note that this succeeds even if the topic doesn't exist
		// in that case we simply get no messages for the topic
		// Also note that currently we only support a single topic
		try {
			Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
			List<KafkaStream<byte[], byte[]>> topicList = consumerMap.get(topic);
			KafkaStream<byte[], byte[]> stream = topicList.get(0);
			it = stream.iterator();
		} catch (Exception e) {
			throw new FlumeException("Unable to get message iterator from Kafka", e);
		}
		log.info("Kafka source {} do started.", getName());
		counter.start();
	}

	@Override
	protected void doStop() throws FlumeException {
		if (consumer != null) {
			// exit cleanly. This syncs offsets of messages read to ZooKeeper
			// to avoid reading the same messages again
			consumer.shutdown();
		}
		counter.stop();
		log.info("Kafka Source {} do stopped. Metrics: {}", getName(), counter);
	}

	/**
	 * Check if there are messages waiting in Kafka,
	 * waiting until timeout (10ms by default) for messages to arrive.
	 * and catching the timeout exception to return a boolean
	 */
	boolean hasNext() {
		try {
			it.hasNext();
			return true;
		} catch (ConsumerTimeoutException e) {
			return false;
		}
	}

}
