package com.linkstec.kafka;

import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import java.util.*;
import java.util.concurrent.TimeUnit;

/**
 * 根据优先级消费数据
 * 示例 TEST_1>TEST_2>TEST_2......
 * @author linkage
 *
 */
public abstract class KafkaConsumerServicePriority implements Runnable {
	private static final Logger logger = LogManager.getLogger();

	protected final Map<Integer,KafkaConsumer<String, String>> consumers = new HashMap<Integer,KafkaConsumer<String, String>>();
	//高优先级单线程领先数量
	private static int PRIORITY_LIMIT_COUNT;
	//优先级数量
	protected static int PRIORITY_EVENT_FLOW_NUM;
	//分隔符
	protected static String PRIORITY_TOPIC_DELIMITER;

	static {
	    String count = "1000";//ConfigUtil.getProperty("priority.limit.count");
        if (StringUtils.isEmpty(count)) {
            throw new RuntimeException("priority.limit.count未配置，推送服务退出运行");
        }
        PRIORITY_LIMIT_COUNT = Integer.parseInt(count);

        String flowNum = "5";//ConfigUtil.getProperty("priority.event.flow.num");
        if (StringUtils.isEmpty(flowNum)) {
            throw new RuntimeException("priority.event.flow.num未配置，推送服务退出运行");
        }
        PRIORITY_EVENT_FLOW_NUM = Integer.parseInt(flowNum);

		String topicDelimiter = "_";//ConfigUtil.getProperty("priority.topic.delimiter");
		if (StringUtils.isEmpty(topicDelimiter)) {
			throw new RuntimeException("priority.topic.delimiter未配置，推送服务退出运行");
		}
		PRIORITY_TOPIC_DELIMITER = topicDelimiter;
    }
	public void init(String groupId, String PRIORITY_MESSAGE_TOPIC) {
		Properties props = new Properties();
		props.put("bootstrap.servers", "192.168.2.232:9092,192.168.2.233:9092,192.168.2.234:9092");
		props.put("key.deserializer", StringDeserializer.class.getName());
		props.put("value.deserializer", StringDeserializer.class.getName());
		props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
		props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
		props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "500");
		int i = 0;
		while (i < PRIORITY_EVENT_FLOW_NUM) {
			String tempGroupId = groupId + PRIORITY_TOPIC_DELIMITER + (i + 1);
			props.put(ConsumerConfig.GROUP_ID_CONFIG, tempGroupId);
			
			String topic = PRIORITY_MESSAGE_TOPIC + PRIORITY_TOPIC_DELIMITER + (++i);
			KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
			consumers.put(i, consumer);
			logger.info("消费组:" + tempGroupId +";消费topic:"+ topic + ";i=" + i);
		}
	}

	/**
	 * 如果有必要则一直等待
	 *
	 * @return
	 */
	public List<String> poll() {
		List<String> pollData = null;
		boolean sleepFlag = true;
		for (int j = 1; j <= PRIORITY_EVENT_FLOW_NUM; j++) {
			ConsumerRecords<String, String> records = null;
			try {
				records = consumers.get(j).poll(0);

				for (ConsumerRecord<String, String> record : records) {
					Map<String, Object> data = new HashMap<String, Object>();
					data.put("partition", record.partition());
					data.put("offset", record.offset());
					data.put("value", record.value());
					data.put("topic", record.topic());
					logger.info("" + data);
				}

			} catch (WakeupException e) {
				// ignore for shutdown
			} finally {
			}
			if (records.count() > PRIORITY_LIMIT_COUNT) {
				sleepFlag = false;
				// 高优先级的消费到1000条就返回，少于1000条，带上一部分低优先级数据
				logger.info("消费个数：" + records.count());
				break;
			} else if (records.count() > 0) {
				// 数据量少的情况下，一次循环消费的数量
				sleepFlag = false;
				logger.info("消费个数：" + records.count());
			}
		}
		// 空闲时间一秒访问两次
		if (sleepFlag) {
			try {
				TimeUnit.MILLISECONDS.sleep(500);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}

		return pollData;
	}

	public void shutdown() {
		for (KafkaConsumer<String, String> consumer : consumers.values()) {
			consumer.wakeup();
		}
	}
}