/*  
 * @(#) KafkaCompressTest.java Create on 2015年1月27日 下午4:58:22   
 *   
 * Copyright 2015 jstx.
 */

package kafka.examples;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

/**
 * @KafkaCompressTest.java
 * @created at 2015年1月27日 下午4:58:22 by zhanghongliang@hiveview.com
 *
 * @desc
 *
 * @author zhanghongliang@hiveview.com
 * @version $Revision$
 * @update: $Date$
 */
public class KafkaCompressTest {
	private static final String TOPIC = "gzip-topic-test";

	public static void main(String[] args) {
		System.out.println("product start...................");
		new Producer(TOPIC).start();

		System.out.println("Consumer start...................");
		new Consumer(TOPIC).start();

		System.out.println("启动完成------------------------------------------");
		try {
			Thread.sleep(500 * 1000);
			System.exit(0);
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	// 生产者
	public static class Producer extends Thread {
		private final kafka.javaapi.producer.Producer<String, String> producer;
		private final String topic;
		private final Properties props = new Properties();

		public Producer(String topic) {
			props.put("serializer.class", "kafka.serializer.StringEncoder");
			props.put("metadata.broker.list", KafkaProperties.BROKER_LIST);
			// O means no compression. 1 means GZIP compression. 2 means Snappy
			// compression
			props.put("compression.codec", "gzip");
			// 指定需要压缩传输的topic 这些只对 send set 好使，单个消息不好使
			props.put("compressed.topics", topic);

			props.put("producer.type", "async");
			props.put("queue.enqueue.timeout.ms", "-1");
			props.put("batch.num.messages", "200");

			// props.put("queue.buffering.max.messages", "10");
			// props.put("batch.num.messages", "10");
			// props.put("send.buffer.bytes", "1024");

			// Use random partitioner. Don't need the key type. Just set it to
			// Integer.
			// The message is of type String.
			producer = new kafka.javaapi.producer.Producer<String, String>(
					new ProducerConfig(props));
			this.topic = topic;
		}

		public void run() {
			try {
				int messageNo = 1;
				String threadName = Thread.currentThread().getName();
				while (true) {
					List<KeyedMessage<String, String>> list = new ArrayList<KeyedMessage<String, String>>();
					for (int i = 0; i < 20; i++) {
						String messageStr = new String(
								"Message111111111111111111111111111111_"
										+ messageNo + "_" + i);
						KeyedMessage<String, String> keyedMessage = new KeyedMessage<String, String>(
								topic, messageStr);
						list.add(keyedMessage);
					}
					Thread.sleep(1000);
					// 发送给brokder 消息
					producer.send(list);
					System.out.println(threadName + " 批量发送消息。。。。。。");
					messageNo++;
				}
			} catch (InterruptedException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
	}

	// 消费者
	public static class Consumer extends Thread {
		private final ConsumerConnector consumer;
		private final String topic;

		public Consumer(String topic) {
			consumer = kafka.consumer.Consumer
					.createJavaConsumerConnector(createConsumerConfig());

			this.topic = topic;
		}

		private static ConsumerConfig createConsumerConfig() {
			Properties props = new Properties();
			props.put("zookeeper.connect", KafkaProperties.zkConnect);
			props.put("zookeeper.session.timeout.ms", "1000");
			props.put("zookeeper.sync.time.ms", "200");
			props.put("auto.commit.interval.ms", "1000");
			props.put("group.id", "1999");
			// 自动设置从最早读取，根据group id
			// http://kafka.apache.org/documentation.html
			props.put("auto.offset.reset", "smallest");
			return new ConsumerConfig(props);

		}

		public void run() {
			String threadName = Thread.currentThread().getName();
			Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
			topicCountMap.put(topic, new Integer(1));

			Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer
					.createMessageStreams(topicCountMap);

			KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
			ConsumerIterator<byte[], byte[]> it = stream.iterator();
			// it hasNext 如果没有消息 会阻塞
			while (it.hasNext()) {
				MessageAndMetadata<byte[], byte[]> mdm = it.next();
				System.out.println(threadName + " 收到消息:" + new String(mdm.message()));
				System.out.println("topic:" + mdm.topic());
				System.out.println("partition:" + mdm.partition());
				System.out.println("offset:" + mdm.offset());
				System.out
						.println("-------------------------------------------");
			}
		}
	}
}
