package com.linkstec.kafka;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

/**
 * Kafka New Consumer API基于Kafka自身的group coordination protocol（老版本基于Zookeeper）
 * ，new Consumer具有以下优势 1、合并过去High Level和Low Level的API，提供一个同时支持group
 * coordination和lower level access 2、使用纯Java重写API，运行时不再依赖Scala和Zookeeper
 * 3、更安全：Kafka0.9提供的security extensions，只支持new consumer 4、支持fault-tolerant group
 * of consumer processes，老版本强依赖于zookeeper来实现，由于其中的逻辑极其复杂，
 * 所以其他编程语言实现这个特性非常困难，目前kafka官方已经将此特性在C client上实现了
 * 
 * 虽然new consumer重构API并且使用新的coordination protocol，但是概念并没有根本改变， 所以熟悉old
 * consumer的用户不会难以理解new consumer。然而，需要额外关心下group management 和threading model。
 * 
 * @author linkage
 *
 */
public class KafkaConsumerTestPriority implements Runnable {
	private static final Logger logger = LogManager.getLogger();
	private final Map<Integer,KafkaConsumer<String, String>> consumers = new HashMap<Integer,KafkaConsumer<String, String>>();
	private final static int PRIORITY_LIMIT_COUNT = 1000;
	//跟据topic分区数 = Partitions*n
	private final static int PRIORITY_NUM_CONSUMERS = 3;
	private final static int PRIORITY_EVENT_FLOW_NUM = 5;
	private final static String PRIORITY_TOPIC = "TEST";
	private final static String PRIORITY_TOPIC_DELIMITER = "_";

	public KafkaConsumerTestPriority() {
	}
	public void init(String groupId) {
		Properties props = new Properties();
		props.put("bootstrap.servers", "192.168.2.232:9092,192.168.2.233:9092,192.168.2.234:9092");
		props.put("key.deserializer", StringDeserializer.class.getName());
		props.put("value.deserializer", StringDeserializer.class.getName());
		// pushTest_2本消费组不存在offset程序启动前的数据不消费,只在程序启动的时候消费发来的消息
		props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
		props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
		props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "500");
		int i = 0;
		while (i < PRIORITY_EVENT_FLOW_NUM) {
			String tempGroupId = groupId + PRIORITY_TOPIC_DELIMITER + (i + 1);
			props.put(ConsumerConfig.GROUP_ID_CONFIG, tempGroupId);
			
			String topic = PRIORITY_TOPIC + PRIORITY_TOPIC_DELIMITER + (++i);
			KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
			consumers.put(i, consumer);
			logger.info("消费组:" + tempGroupId +";消费topic:"+ topic + ";i=" + i);
		}
	}

	@Override
	public void run() {
		for (int j = 1; j <= PRIORITY_EVENT_FLOW_NUM; j++) {
			String topic = PRIORITY_TOPIC + PRIORITY_TOPIC_DELIMITER + (j);
			logger.info("订阅topic:" + topic);
			consumers.get(j).subscribe(Arrays.asList(topic));
		}
		while (true) {
			logger.info("调度一次");
			poll();
		}
	}

	/**
	 * 如果有必要则一直等待
	 *
	 * @return
	 */
	public List<String> poll() {
		List<String> pollData = null;
		boolean sleepFlag = true;
		for (int j = 1; j <= PRIORITY_EVENT_FLOW_NUM; j++) {
			ConsumerRecords<String, String> records = null;
			try {
				records = consumers.get(j).poll(0);

				for (ConsumerRecord<String, String> record : records) {
					Map<String, Object> data = new HashMap<>();
					data.put("partition", record.partition());
					data.put("offset", record.offset());
					data.put("value", record.value());
					data.put("topic", record.topic());
					logger.info("" + data);
				}

			} catch (WakeupException e) {
				// ignore for shutdown
			} finally {
			}
			if (records.count() > PRIORITY_LIMIT_COUNT) {
				sleepFlag = false;
				// 高优先级的消费到1000条就返回，少于1000条，带上一部分低优先级数据
				logger.info("消费个数：" + records.count());
				break;
			} else if (records.count() > 0) {
				// 数据量少的情况下，一次循环消费的数量
				sleepFlag = false;
				logger.info("消费个数：" + records.count());
			}
		}
		// 空闲时间一秒访问两次
		if (sleepFlag) {
			try {
				TimeUnit.MILLISECONDS.sleep(1000);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}

		return pollData;
	}

	public void shutdown() {
		for (KafkaConsumer<String, String> consumer : consumers.values()) {
			consumer.wakeup();
		}
	}

	public static void main(String[] args) {
		String groupId = "pushTest";
		final List<KafkaConsumerTestPriority> consumers = new ArrayList<>();
		ExecutorService executor = Executors.newFixedThreadPool(PRIORITY_NUM_CONSUMERS);
		logger.info("线程池大小:" + PRIORITY_NUM_CONSUMERS);
		for (int i = 0; i < PRIORITY_NUM_CONSUMERS; i++) {
			KafkaConsumerTestPriority consumer = new KafkaConsumerTestPriority();
			consumer.init(groupId);
			consumers.add(consumer);
			executor.submit(consumer);
		}
		Runtime.getRuntime().addShutdownHook(new Thread() {
			@Override
			public void run() {
				for (KafkaConsumerTestPriority consumer : consumers) {
					consumer.shutdown();
				}
				executor.shutdown();
				try {
					executor.awaitTermination(5000, TimeUnit.MILLISECONDS);
				} catch (InterruptedException e) {
					e.printStackTrace();
				}
			}
		});
	}
}