package com.bj58.ecdata.monitor.monitor;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import javax.annotation.PostConstruct;

import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.TopicAndPartition;
import kafka.javaapi.OffsetRequest;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.consumer.SimpleConsumer;

import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.bj58.ecdata.monitor.service.SMS;
import com.bj58.ecdata.util.JsonUtils;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;

/**
 * kafka监控，现在已经由数据平台部管理
 * @author zhaoxiang
 */
//@Component
public class KafkaMonitor {

	static Logger log = LoggerFactory.getLogger(KafkaMonitor.class);

	private List<String> check_groups = Lists.newArrayList();

	private Map<String, CheckKafkaInfo> topic_partition_logSize_last;

	private Map<String, CheckKafkaInfo> group_partition_offset_last;

	private ImmutableMap<String, String> host_ip_map;
	
	private CuratorFramework client;

	@PostConstruct
	public void init() {

		host_ip_map = ImmutableMap.of("spark01", "10.5.20.100", "spark03", "10.5.20.18", "spark09", "10.9.20.31");

		CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
		CuratorFramework client = builder.connectString("10.5.20.18:2181,10.5.20.100:2181,10.9.20.31:2181/opt/kafka").sessionTimeoutMs(10000)
				.connectionTimeoutMs(5000).canBeReadOnly(true).retryPolicy(new ExponentialBackoffRetry(1000, Integer.MAX_VALUE)).defaultData(null)
				.build();
		client.start();
		this.client = client;
		String consumer_path = "/consumers_online";
		try {
			//从zk路径获取线上的consumer groups,新上线时要添加路径，下线时要删除路径并重启。
			check_groups = client.getChildren().forPath(consumer_path);
		} catch (Exception e1) {
			log.error("ERROR occurs when get consumer groups form /opt/kafka/consumers_online",e1);
		}
		new Thread(new Runnable() {
			@Override
			public void run() {
				while (true) {
					try {
						process();
						Thread.sleep(1000 * 60 * 2); // 2分钟
					} catch (Exception e) {
						log.error("KafkaMonitor process error!", e);
					}
				}
			}
		}).start();
		
		log.info("KafkaMonitor started!");
	}

	private void process() throws Exception {
		Map<String, BrokerInfo> biMap = new HashMap<String, BrokerInfo>();
		//获取所有brokers信息
		String brokersIdsPath = "/brokers/ids";
		List<String> brokersIds = client.getChildren().forPath(brokersIdsPath);
		for (String one : brokersIds) {
			byte[] data = client.getData().forPath(brokersIdsPath + "/" + one);
			String brokerInfoStr = new String(data);
			BrokerInfo bi = JsonUtils.fromJSON(brokerInfoStr, BrokerInfo.class);
			bi.setId(one);
			biMap.put(bi.getId(), bi);
		}

		Map<String, TopicInfo> tiMap = new HashMap<String, TopicInfo>();
		String topicsPath = "/brokers/topics";
		//获取所有topic 的partition分布信息
		List<String> topics = client.getChildren().forPath(topicsPath);
		for (String one : topics) {
			byte[] data = client.getData().forPath(topicsPath + "/" + one);
			String topicInfoStr = new String(data);
			TopicInfo ti = JsonUtils.fromJSON(topicInfoStr, TopicInfo.class);
			ti.setTopic(one);
			tiMap.put(ti.getTopic(), ti);
		}

		
		String groupsPath = "/consumers";
		//所有consumer group,可能包括历史测试的
		List<String> groups = client.getChildren().forPath(groupsPath);

		List<CheckKafkaInfo> ckiList = new ArrayList<CheckKafkaInfo>();
		for (String group : groups) {
			//检查group是否是线上的
			if (check_groups.contains(group)) {
				String topic = client.getChildren().forPath(groupsPath + "/" + group + "/offsets").get(0);
				TopicInfo ti = tiMap.get(topic);
				//当前topic的 <broker,[partitions]> 映射关系
				Multimap<String, Integer> map = ti.getBrokerId_partitions();
				for (String brokerId : map.keySet()) {
					BrokerInfo bi = biMap.get(brokerId);
					String host = bi.getHost();
					int port = bi.getPort();
					List<Integer> partitions = new ArrayList<Integer>(map.get(brokerId));
					Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = Maps.newHashMap();
					//构造请求
					for (Integer partition : partitions) {
						requestInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(-1, 1));
					}
					OffsetRequest request = new OffsetRequest(requestInfo, (short) 0, "");
					SimpleConsumer consumer = null;
					try {
						consumer = new SimpleConsumer(host_ip_map.get(host), port, 3000, 1024, topic);
						OffsetResponse response = consumer.getOffsetsBefore(request);
						for (Integer partition : partitions) {
							long[] offsets = response.offsets(topic, partition);
							long logSize = offsets[0];
							//从zk路径上获取offsets
							String offsetPath = groupsPath + "/" + group + "/offsets/" + topic + "/" + partition;
							byte[] data = client.getData().forPath(offsetPath);
							long offset = Long.valueOf(new String(data));
							
							CheckKafkaInfo cki = new CheckKafkaInfo();
							cki.setGroup(group);
							cki.setTopic(topic);
							cki.setPartition(partition);
							cki.setLogSize(logSize);
							cki.setOffset(offset);
							cki.setLag(logSize - offset);
							ckiList.add(cki);
						}

					} finally {
						if (consumer != null)
							consumer.close();
					}

				}
			}
		}

		Map<String, CheckKafkaInfo> topic_partition_logSize = new HashMap<String, CheckKafkaInfo>();
		Map<String, CheckKafkaInfo> group_partition_offset = new HashMap<String, CheckKafkaInfo>();
		for (CheckKafkaInfo one : ckiList) {
			topic_partition_logSize.put(one.getTopic() + "_" + one.getPartition(), one);
			group_partition_offset.put(one.getGroup() + "_" + one.getPartition(), one);
		}

		if (null != topic_partition_logSize_last) {
			for (String topic_partition : topic_partition_logSize.keySet()) {
				long last_logSize = topic_partition_logSize_last.get(topic_partition).getLogSize();
				long logSize = topic_partition_logSize.get(topic_partition).getLogSize();
				//log.info(topic_partition + " last_logSize : " + last_logSize + ", logSize : " + logSize);
				if (logSize <= last_logSize) {
					String message = "Kafka topic_partition : %s, produce data error! last logSize : %d, logSize : %d";
					log.warn("{} last_logSize : {}, logSize : {}" ,topic_partition, last_logSize, logSize);
					message = String.format(message, topic_partition, last_logSize, logSize);
					SMS.defaultSend(message);
				}
			}
		}
		topic_partition_logSize_last = topic_partition_logSize;

		if (null != group_partition_offset_last) {
			for (String group_partition : group_partition_offset.keySet()) {
				long last_offset = group_partition_offset_last.get(group_partition).getOffset();
				long offset = group_partition_offset.get(group_partition).getOffset();
				if (offset <= last_offset) {
					log.warn("{} last_offset : {}, offset : {}" ,group_partition, last_offset, offset);
					String message = "Kafka group_partition : %s, consume data error! last offset : %d, offset : %d";
					message = String.format(message, group_partition, last_offset, offset);
					SMS.defaultSend(message);
				}
			}
		}
		group_partition_offset_last = group_partition_offset;
	}
	
	class CheckKafkaInfo {
		private String topic;
		private String group;
		private int partition;
		private long logSize;
		private long offset;
		private long lag;

		public String getTopic() {
			return topic;
		}

		public void setTopic(String topic) {
			this.topic = topic;
		}

		public String getGroup() {
			return group;
		}

		public void setGroup(String group) {
			this.group = group;
		}

		public int getPartition() {
			return partition;
		}

		public void setPartition(int partition) {
			this.partition = partition;
		}

		public long getLogSize() {
			return logSize;
		}

		public void setLogSize(long logSize) {
			this.logSize = logSize;
		}

		public long getOffset() {
			return offset;
		}

		public void setOffset(long offset) {
			this.offset = offset;
		}

		public long getLag() {
			return lag;
		}

		public void setLag(long lag) {
			this.lag = lag;
		}

		@Override
		public String toString() {
			return "CheckKafkaInfo [" + (topic != null ? "topic=" + topic + ", " : "") + (group != null ? "group=" + group + ", " : "") + "partition="
					+ partition + ", logSize=" + logSize + ", offset=" + offset + ", lag=" + lag + "]";
		}

	}

}

class TopicInfo {

	private String topic;

	private Map<Integer, List<String>> partitions;
	
	private Multimap<String, Integer> brokerId_partitions;
	
	public Map<Integer, List<String>> getPartitions() {
		return partitions;
	}

	public void setPartitions(Map<Integer, List<String>> partitions) {
		if (null == brokerId_partitions)
			brokerId_partitions = HashMultimap.create();
		for (Entry<Integer, List<String>> one : partitions.entrySet()) {
			Integer pid = one.getKey();
			List<String> bids = one.getValue();
			for (String bid : bids) {
				brokerId_partitions.put(bid, pid);
			}
		}
		this.partitions = partitions;
	}

	public Multimap<String, Integer> getBrokerId_partitions() {
		return brokerId_partitions;
	}

	public String getTopic() {
		return topic;
	}

	public void setTopic(String topic) {
		this.topic = topic;
	}

	@Override
	public String toString() {
		return "TopicInfo [" + (topic != null ? "topic=" + topic + ", " : "")
				+ (brokerId_partitions != null ? "brokerId_partitions=" + brokerId_partitions : "") + "]";
	}

}

class BrokerInfo {
	private String id;
	private String host;
	private int port;

	public String getHost() {
		return host;
	}

	public void setHost(String host) {
		this.host = host;
	}

	public int getPort() {
		return port;
	}

	public void setPort(int port) {
		this.port = port;
	}

	public String getId() {
		return id;
	}

	public void setId(String id) {
		this.id = id;
	}

	@Override
	public String toString() {
		return "BrokerInfo [" + (id != null ? "id=" + id + ", " : "") + (host != null ? "host=" + host + ", " : "") + "port=" + port + "]";
	}
}
