package com.wang.helloworld.client;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.ConsumerTimeoutException;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;

import com.wang.helloworld.HelloKafka;
import com.wang.helloworld.event.EventManager;

public class KafkaConsumer extends EventManager{
	private static ConsumerConnector consumer;

	public KafkaConsumer(Properties props)
	{
		ConsumerConfig config = new ConsumerConfig(props);
		consumer = Consumer.createJavaConsumerConnector(config);
	}
	public KafkaConsumer(Properties props, boolean randomGroup)
	{
		if(randomGroup)
		{
			Random random=new Random();
			props.put("group.id", "Group_" + random.nextInt(100));
		}
		
		ConsumerConfig config = new ConsumerConfig(props);
		consumer = Consumer.createJavaConsumerConnector(config);
	}
	
	public void subscribe(String topic) {
	    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	    
	    // create 3 partitions of the stream for topic “test”, to allow 4 threads to consume
	    topicCountMap.put(topic, new Integer(HelloKafka.partitions));
	    //topicCountMap.put(topic+1, new Integer(2));
		Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
		
		Set<String> topics = consumerMap.keySet();
		for(String topicName:topics)
		{
			List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topicName);

			// create 3 threads to consume from each of the partitions
			ExecutorService executor = Executors.newFixedThreadPool(streams.size());
			HelloKafka.executorManager.add(executor);
			
			try {
				for (final KafkaStream<byte[], byte[]> stream : streams) {
					final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();

					executor.submit(new Thread() {
						public void run() {
							boolean timeout = true;
							
							try{
								while(timeout){
									try {
										// hasNext() return false while consumer.shutdown() is called.
										while (consumerIterator.hasNext()) {
											timeout = false;
											
											/**
											 *  hasNext() will NOT change offset, but next() DO. Call consumer.commitOffsets() to commit offset.
											 */
											MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next();
											MessageEvent messageEvent = new MessageEvent(messageAndMetadata);
											if (processEvent(messageEvent, getId())) {
												return;
											}
										}
										System.out.println("No next any more.");
										TimeUnit.SECONDS.sleep(1);
									}catch(ConsumerTimeoutException e){
										timeout = true;
									} catch(InterruptedException e)
									{
										System.out.println("[Thread-" + getId() + "] is shutting down...");
										break;
									}catch (Exception e) {
										e.printStackTrace();
									}
								}
							}
							finally
							{
								consumer.commitOffsets();
								consumer.shutdown();
							}
						}
					});
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
		}
	}
	
	public boolean processEvent(MessageEvent messageEvent, long threadId)
	{
		System.out.print("[Thread-" + threadId + "]");
		notifyListeners(messageEvent);
		return false;
	}
}
