package com.walleipt.kafka;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Serializable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;

import org.apache.log4j.Logger;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

public class ContextListener implements ServletContextListener {
	
    @Override
    public void contextInitialized(ServletContextEvent sce) {
    	
    	Properties props = new Properties();
		InputStream inputStream = ContextListener.class.getResourceAsStream("/kafka.properties");
		BufferedReader bf;
		try {
			bf = new BufferedReader(new InputStreamReader(inputStream,"UTF-8"));
			props.load(bf);
		} catch (IOException e1) {
			e1.printStackTrace();
		}
		String[] kafkaAddresses = props.getProperty( "zookeeper.connects" ).split(",") ;
		
		for (String address : kafkaAddresses) {
			// 消费者配置文件
			Properties propsTmp = new Properties();
			// zookeeper地址
			propsTmp.put("zookeeper.connect", address );
			// 组id
			propsTmp.put("group.id", "123");
			// 自动提交消费情况间隔时间
			propsTmp.put("auto.commit.interval.ms", "1000");
			ConsumerConfig consumerConfig = new ConsumerConfig(propsTmp);
			ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
			
			
			Properties propsTopic = new Properties();
			InputStream inputStreamTopic = ContextListener.class.getResourceAsStream("/topic.properties");
			BufferedReader bfTopic;
			try {
				bfTopic = new BufferedReader(new InputStreamReader(inputStreamTopic,"UTF-8"));
				propsTopic.load(bfTopic);
			} catch (IOException e1) {
				e1.printStackTrace();
			}
			String[] topics = propsTopic.getProperty( "topics" ).split(",") ;
			if ( topics == null || topics.length<=0 ) break;
			
			
			try {
				this.startConsumer( consumer, address, topics, new MqMessageHandler<String>() {
					@Override
					public void handle(String message, String address, String topic) {
						Logger loggerTopic = LoggerUtil.getLoggerByName( address.substring(0,address.indexOf(":")), topic ) ;
						loggerTopic.info( message.toString() );
					}
				});
			} catch (Exception e) {
				System.err.println( e.getMessage() );
			}
		}
    }
    
    
    
    // 内部抽象类 用于实现自己的处理逻辑
 	public static abstract class MqMessageHandler<T extends Serializable> {
 		public abstract void handle(T message, String address, String topic);
 	}
 	

    /**
	 * 启动一个消费程序
	 * 
	 * @param topic
	 *            要消费的topic名称
	 * @param handler
	 *            自己的处理逻辑的实现
	 * @param threadCount
	 *            消费线程数，该值应小于等于partition个数，多了也没用
	 */
	public <T extends Serializable> void startConsumer(ConsumerConnector consumer, String address, String[] topics, final MqMessageHandler<String> handler) throws Exception {
		
		// 设置处理消息线程数，线程数应小于等于partition数量，若线程数大于partition数量，则多余的线程则闲置，不会进行工作
		// key:topic名称 value:线程数
		Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
		for (String topic : topics) {
			topicCountMap.put(topic, new Integer(2));
		}
		Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);

		// 声明一个线程池，用于消费各个partition
		ExecutorService executor = Executors.newFixedThreadPool(8);
		
		for (String topic : topics) {
			// 获取对应topic的消息队列
			List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
			// 为每一个partition分配一个线程去消费
			for (final KafkaStream stream : streams) {
				executor.execute(new Runnable() {
					@Override
					public void run() {
						
						ConsumerIterator<byte[], byte[]> it = stream.iterator();
						// 有信息则消费，无信息将会阻塞
						while (it.hasNext()) {
							
							String message = null;
							try {
								// 将字节码反序列化成相应的对象
								byte[] bytes = it.next().message();
//								message = (T) SerializationUtils.deserialize(bytes);
								message = new String( bytes ) ; 
							} catch (Exception e) {
								e.printStackTrace();
								return;
							}
							// 调用自己的业务逻辑
							try {
								handler.handle(message, address, topic);
							} catch (Exception e) {
								e.printStackTrace();
							}
						}
					}
				});
			}
		}
		
	}
    
    @Override
    public void contextDestroyed(ServletContextEvent sce) {

    }
}
