package com.mht2017.common.utils;

import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Consumer;

import com.mht2017.common.dto.KafkaBrokerDto;
import com.mht2017.common.dto.KafkaTopicStateDto;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;

import com.alibaba.fastjson.JSON;

import kafka.api.OffsetRequest;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.consumer.SimpleConsumer;

public class KafkaUtil {

	private static final Logger LOGGER = Logger.getLogger(KafkaUtil.class);
	private static final String actionName = "Kafka辅助工具";
	private static final String brokerIdsRoot = "/brokers/ids";
	private static final String brokerTopicsRoot = "/brokers/topics";
	private static final int DEFAULT_PARTITION = 0;

	private static final BrokerIdsWatcher BROKER_IDS_WATCHER = new BrokerIdsWatcher();
	private static final Map<String, SimpleConsumer> topicConsumerMap = new HashMap<>();
	private static final Map<Integer, SimpleConsumer> consumerCache = new HashMap<>();
	private static final List<KafkaBrokerDto> kafkaBrokers = new LinkedList<>();

	/**
	 * @return <code>-1</code>表示程序出错，大于0的值为offset
	 */
	public static long getBeginOffset(String topic) {
		return getOffset(topic, 0, OffsetRequest.EarliestTime());
	}

	/**
	 * @return <code>-1</code>表示程序出错，大于0的值为offset
	 */
	public static long getEndOffset(String topic) {
		return getOffset(topic, 0, OffsetRequest.LatestTime());
	}

	private static long getOffset(String topic, int partition, long watchTime) {
		SimpleConsumer cachedConsumer = getCachedConsumer(topic);
		if (null != cachedConsumer) {
			try {
				Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
				requestInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(watchTime, 1));
				OffsetResponse response = cachedConsumer.getOffsetsBefore(new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), cachedConsumer.clientId()));
				if (response.hasError()) {
					LOGGER.error(String.format("%s-获取Offset出错，主题: '%s', 分区: '%s': '%s'", actionName, topic, partition, ErrorMapping.exceptionFor(response.errorCode(topic, partition))));
					// 常见的错误：分区被重新分配了，缓存中topic记录的broker可能不对了
					cleanConsumerByTopic(topic);
				} else {
					long offset = response.offsets(topic, partition)[0];
					if (LOGGER.isTraceEnabled()) {
						LOGGER.log(Level.TRACE, String.format("%s-获取offset('%s/%s'/%s='%s')", actionName, topic, partition, OffsetRequest.LatestTime() == watchTime ? "end" : "begin", offset));
					}
					return offset;
				}
			} catch (Exception e) {
				LOGGER.error(String.format("%s-获取endOffset出错,clientId: '%s',topic: '%s', partition: '%s', msg: '%s'", actionName, cachedConsumer.host(), topic, partition, e.getMessage()), e);
				createOrRefreshConsumer();
			}
		}
		return -1;
	}

	public static Map<String, Long> getBeginOffset(List<String> topics) {
		return getOffset(topics, OffsetRequest.EarliestTime());
	}

	public static Map<String, Long> getEndOffset(List<String> topics) {
		return getOffset(topics, OffsetRequest.LatestTime());
	}

	private static Map<String, Long> getOffset(List<String> topics, long watchTime) {
		Map<SimpleConsumer, List<String>> consumer2Topics = getCachedConsumer(topics);
		if (MapUtils.isEmpty(consumer2Topics)) {
			return null;
		}

		Map<String, Long> allTopic2Offset = new ConcurrentHashMap<>(SizeUtil.sizeZeroIfNull(topics));
		consumer2Topics.entrySet().parallelStream().forEach((Consumer<? super Entry<SimpleConsumer, List<String>>>) (entry) -> {
			SimpleConsumer simpleConsumer = entry.getKey();
			List<String> topicsInConsumer = entry.getValue();
			if (null != simpleConsumer) {
				try {
					Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
					for (String topic : topicsInConsumer) {
						requestInfo.put(new TopicAndPartition(topic, DEFAULT_PARTITION), new PartitionOffsetRequestInfo(watchTime, 1));
					}
					OffsetResponse response = simpleConsumer.getOffsetsBefore(new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId()));
					if (response.hasError()) {
						// 常见的错误：分区被重新分配了，缓存中topic记录的broker可能不对了
						List<String> errorTopics = new LinkedList<>();
						for (String topic : topicsInConsumer) {
							if (response.errorCode(topic, DEFAULT_PARTITION) > 0) {
								LOGGER.error(String.format("%s-获取Offset出错，主题: '%s', 分区: '%s': '%s'", actionName, topic, DEFAULT_PARTITION,
										ErrorMapping.exceptionFor(response.errorCode(topic, DEFAULT_PARTITION))));
								errorTopics.add(topic);
							}
						}
						cleanConsumerByTopic(errorTopics);
					} else {
						Map<String, Long> topic2Offset = new HashMap<>();
						for (String topic : topicsInConsumer) {
							long offset = response.offsets(topic, DEFAULT_PARTITION)[0];
							topic2Offset.put(topic, offset);
							if (LOGGER.isTraceEnabled()) {
								LOGGER.log(Level.TRACE,
										String.format("%s-获取offset('%s/%s'/%s='%s')", actionName, topic, DEFAULT_PARTITION, OffsetRequest.LatestTime() == watchTime ? "end" : "begin", offset));
							}
						}
						allTopic2Offset.putAll(topic2Offset);
					}
				} catch (Exception e) {
					LOGGER.error(String.format("%s-获取endOffset出错,clientId: '%s',topic: '%s', partition: '%s', msg: '%s'", actionName, simpleConsumer.host(), topics, DEFAULT_PARTITION, e.getMessage()),
							e);
					createOrRefreshConsumer();
				}
			}
		});
		return allTopic2Offset;
	}

	private static Map<SimpleConsumer, List<String>> getCachedConsumer(List<String> topics) {
		if (null == topics) {
			return null;
		}

		Map<SimpleConsumer, List<String>> result = new HashMap<>();
		for (String topic : topics) {
			SimpleConsumer cachedConsumer = topicConsumerMap.get(topic);
			if (null == cachedConsumer) {
				synchronized (topicConsumerMap) {
					if (null == (cachedConsumer = topicConsumerMap.get(topic))) {
						topicConsumerMap.put(topic, cachedConsumer = createConsumerWithTopic(topic));
					}
				}
			}
			List<String> list = result.get(cachedConsumer);
			if (null == list) {
				result.put(cachedConsumer, list = new LinkedList<>());
			}
			list.add(topic);
		}
		return result;
	}

	private static SimpleConsumer getCachedConsumer(String topic) {
		if (null == topic) {
			return null;
		}

		SimpleConsumer cachedConsumer = topicConsumerMap.get(topic);
		if (null == cachedConsumer) {
			synchronized (topicConsumerMap) {
				if (null == (cachedConsumer = topicConsumerMap.get(topic))) {
					topicConsumerMap.put(topic, cachedConsumer = createConsumerWithTopic(topic));
				}
			}
		}
		return cachedConsumer;
	}

	private static SimpleConsumer createConsumerWithTopic(String topic) {
		String path = String.format("%s/%s/partitions/0/state", brokerTopicsRoot, topic);
		try {
			if (MapUtils.isEmpty(consumerCache)) {
				createOrRefreshConsumer();
			}
			KafkaTopicStateDto dto = JSON.parseObject(ZooKeeperUtil.getData(path), KafkaTopicStateDto.class);
			if (null != dto) {
				SimpleConsumer simpleConsumer = consumerCache.get(dto.getLeader());
				if (null != simpleConsumer) {
					return simpleConsumer;
				} else {
					createOrRefreshConsumer();
					SimpleConsumer simpleConsumer2 = consumerCache.get(dto.getLeader());
					if (null != simpleConsumer2) {
						return simpleConsumer2;
					} else {
						throw new RuntimeException(String.format("%s-没有对应的brokerId: '%s'", actionName, dto.getLeader()));
					}
				}
			}
		} catch (KeeperException | InterruptedException e) {
			LOGGER.error(String.format("%s-获取消费者出错: '%s'", actionName, e.getMessage()), e);
		}
		return null;
	}

	public static int getBrokerCount() {
		if (CollectionUtils.isEmpty(kafkaBrokers)) {
			refreshBrokerDtos();
		}
		return SizeUtil.sizeZeroIfNull(kafkaBrokers);
	}

	public static int getTopicPartitionCount(String topic) {
		if (StringUtils.isNotEmpty(topic)) {
			try {
				return SizeUtil.size(ZooKeeperUtil.getChildren(String.format("%s/%s/partitions", brokerTopicsRoot, topic)));
			} catch (Exception e) {
				LOGGER.error(String.format("%s-获取Topic信息出错: '%s'", actionName, e.getMessage()), e);
			}
		}
		return -1;
	}

	public static synchronized void createOrRefreshConsumer() {
		try {
			List<KafkaBrokerDto> newKafkaBrokers = refreshBrokerDtos();
			if (CollectionUtils.isNotEmpty(newKafkaBrokers)) {
				Map<Integer, SimpleConsumer> consumerTmp = new HashMap<>();
				for (KafkaBrokerDto dto : newKafkaBrokers) {
					String clientId = String.format("%s-%s", KafkaUtil.class.getSimpleName(), getJvmId());
					LOGGER.info(String.format("%s-创建Kafka的broker连接: '%s:%s', clientId: '%s'", actionName, dto.getHost(), dto.getPort(), clientId));
					SimpleConsumer consumer = new SimpleConsumer(dto.getHost(), dto.getPort(), 3000, 1024, clientId);
					consumerTmp.put(dto.getId(), consumer);
				}
				for (Iterator<SimpleConsumer> iterator = consumerCache.values().iterator(); iterator.hasNext();) {
					try {
						iterator.next().close();
					} catch (Exception ignore) {
					}
				}
				consumerCache.clear();
				consumerCache.putAll(consumerTmp);
				topicConsumerMap.clear();
			} else {
				LOGGER.error(String.format("%s-创建broker连接出错：没有可用的broker", actionName));
			}
		} catch (Exception e) {
			LOGGER.error(String.format("%s-获取Kafka信息出错: '%s'", actionName, e.getMessage()), e);
		}
	}

	private static List<KafkaBrokerDto> refreshBrokerDtos() {
		try {
			List<String> children = ZooKeeperUtil.getZooKeeper().getChildren(brokerIdsRoot, BROKER_IDS_WATCHER);
			if (CollectionUtils.isNotEmpty(children)) {
				List<KafkaBrokerDto> newKafkaBrokers = new ArrayList<>(SizeUtil.sizeZeroIfNull(children));
				for (String id : children) {
					KafkaBrokerDto dto = JSON.parseObject(ZooKeeperUtil.getData(String.format("%s/%s", brokerIdsRoot, id)), KafkaBrokerDto.class);
					newKafkaBrokers.add(dto);
					dto.setId(Integer.parseInt(id));
				}
				synchronized (kafkaBrokers) {
					kafkaBrokers.clear();
					kafkaBrokers.addAll(newKafkaBrokers);
				}
			}
		} catch (Exception e) {
			LOGGER.error(String.format("%s-获取Kafka信息出错: '%s'", actionName, e.getMessage()), e);
		}
		return kafkaBrokers;
	}

	private static String getJvmId() {
		try {
			return StringUtils.join(ManagementFactory.getRuntimeMXBean().getName().split("@"), '-');
		} catch (Exception e) {
			e.printStackTrace();
			return null;
		}
	}

	private static void cleanConsumerByTopic(String topic) {
		synchronized (topicConsumerMap) {
			topicConsumerMap.remove(topic);
		}
	}

	private static void cleanConsumerByTopic(List<String> topics) {
		if (CollectionUtils.isNotEmpty(topics)) {
			synchronized (topicConsumerMap) {
				for (String topic : topics) {
					topicConsumerMap.remove(topic);
				}
			}
		}
	}

	/**
	 * @author yutong.xiao
	 * @date 2017年5月8日 下午3:03:51
	 */
	private static final class BrokerIdsWatcher implements Watcher {

		@Override
		public void process(WatchedEvent event) {
			if (null != event && event.getType() == EventType.NodeChildrenChanged && StringUtils.startsWith(event.getPath(), brokerIdsRoot)) {
				LOGGER.info(String.format("%s-观察到'%s'子节点变化, event: '%s'", actionName, brokerIdsRoot, event));
				createOrRefreshConsumer();
			}
		}
	}

	/**
	 * 获取kafka所有topic
	 */
	public static List<String> getAllTopic() {
		List<String> ls = ZooKeeperUtil.getChildren(brokerTopicsRoot);
		return ls;
	}

}
