package com.linkstec.kafka;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

/**
 * Kafka New Consumer API基于Kafka自身的group coordination protocol（老版本基于Zookeeper）
 * ，new Consumer具有以下优势 1、合并过去High Level和Low Level的API，提供一个同时支持group
 * coordination和lower level access 2、使用纯Java重写API，运行时不再依赖Scala和Zookeeper
 * 3、更安全：Kafka0.9提供的security extensions，只支持new consumer 4、支持fault-tolerant group
 * of consumer processes，老版本强依赖于zookeeper来实现，由于其中的逻辑极其复杂，
 * 所以其他编程语言实现这个特性非常困难，目前kafka官方已经将此特性在C client上实现了
 * 
 * 虽然new consumer重构API并且使用新的coordination protocol，但是概念并没有根本改变， 所以熟悉old
 * consumer的用户不会难以理解new consumer。然而，需要额外关心下group management 和threading model。
 * 
 * @author linkage
 *
 */
public class KafkaConsumerPriority implements Runnable {
	private static final Logger logger = LogManager.getLogger();
	private final KafkaConsumer<String, String> consumer;
	private final KafkaConsumer<String, String> consumer2;
	private final List<String> topics;
	private final int id;

	public KafkaConsumerPriority(int id, String groupId, List<String> topics) {
		this.id = id;
		this.topics = topics;
		Properties props = new Properties();
		props.put("bootstrap.servers", "192.168.2.232:9092,192.168.2.233:9092,192.168.2.234:9092");
		// props.put("bootstrap.servers",
		// "192.168.10.141:9092,192.168.10.143:9092,192.168.10.61:9092");
		props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
		props.put("key.deserializer", StringDeserializer.class.getName());
		props.put("value.deserializer", StringDeserializer.class.getName());
		// pushTest_2本消费组不存在offset程序启动前的数据不消费,只在程序启动的时候消费发来的消息
		props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
		props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
		props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "500");
		this.consumer = new KafkaConsumer<>(props);
		props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId + "2");
		this.consumer2 = new KafkaConsumer<>(props);
	}

	@Override
	public void run() {
		try {
			consumer.subscribe(topics);
			logger.info("订阅topic:" + topics);
			List<String> topics2 = Arrays.asList("TEST_2");
			consumer2.subscribe(topics2);
			logger.info("订阅topic:" + topics2);
			while (true) {
				poll();
				logger.info("调度一次");
			}
		} catch (WakeupException e) {
			// ignore for shutdown
		} finally {
			consumer.close();
		}
	}

	/**
	 * 如果有必要则一直等待
	 *
	 * @return
	 */
	public List<String> poll() {
		boolean sleepFlag = true;
		ConsumerRecords<String, String> records = consumer.poll(0);
		for (ConsumerRecord<String, String> record : records) {
			Map<String, Object> data = new HashMap<>();
			data.put("partition", record.partition());
			data.put("offset", record.offset());
			data.put("value", record.value());
			logger.info(this.id + ": " + data);
		}
		if (records.count() > 1000) {
			// 高优先级的消费到1000条就返回，少于1000条，带上一部分低优先级数据
			sleepFlag = false;
			logger.info("消费个数：" + records.count());
		} else if (records.count() > 0) {
			// 数据量少的情况下，一次循环消费的数量
			sleepFlag = false;
			logger.info("消费个数：" + records.count());
		}
		ConsumerRecords<String, String> records2 = consumer2.poll(0);
		for (ConsumerRecord<String, String> record : records2) {
			Map<String, Object> data = new HashMap<>();
			data.put("partition", record.partition());
			data.put("offset", record.offset());
			data.put("value", record.value());
			logger.info(this.id + ": " + data);
		}
		if (records2.count() > 1000) {
			// 高优先级的消费到1000条就返回，少于1000条，带上一部分低优先级数据
			sleepFlag = false;
			logger.info("消费个数：" + records2.count());
		} else if (records2.count() > 0) {
			// 数据量少的情况下，一次循环消费的数量
			sleepFlag = false;
			logger.info("消费个数：" + records2.count());
		}
		// 空闲时间一秒访问两次
		if (sleepFlag) {
			try {
				TimeUnit.MILLISECONDS.sleep(1000);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}
		return null;
	}

	public void shutdown() {
		consumer.wakeup();
	}

	public static void main(String[] args) {
		int numConsumers = 2;
		String groupId = "pushTest";
		List<String> topics = Arrays.asList("TEST_1");
		ExecutorService executor = Executors.newFixedThreadPool(numConsumers);
		final List<KafkaConsumerPriority> consumers = new ArrayList<>();
		for (int i = 0; i < numConsumers; i++) {
			KafkaConsumerPriority consumer = new KafkaConsumerPriority(i, groupId, topics);
			consumers.add(consumer);
			executor.submit(consumer);
		}
		Runtime.getRuntime().addShutdownHook(new Thread() {
			@Override
			public void run() {
				for (KafkaConsumerPriority consumer : consumers) {
					consumer.shutdown();
				}
				executor.shutdown();
				try {
					executor.awaitTermination(5000, TimeUnit.MILLISECONDS);
				} catch (InterruptedException e) {
					e.printStackTrace();
				}
			}
		});
	}
}