package com.bj58.ecdata.monitor.monitor;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import javax.annotation.PostConstruct;

import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.TopicAndPartition;
import kafka.javaapi.OffsetRequest;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.consumer.SimpleConsumer;

import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.bj58.ecdata.monitor.service.SMS;
import com.bj58.ecdata.util.JsonUtils;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;

//@Component
public class Kafka2Monitor {
	
	static Logger log = LoggerFactory.getLogger(Kafka2Monitor.class);
	
	private final Set<String> check_groups = new HashSet<String>();
	
	private CuratorFramework client;
	
	private Map<String, PartitionInfo> topic_partition_logSize_last;

	private Map<String, PartitionInfo> group_partition_offset_last;
	
	
	private ImmutableMap<String, String> host_ip_map;
	
	
	@PostConstruct
	public void init() {
		check_groups.add("display_group");
		check_groups.add("click_group");
		check_groups.add("track_pc_group");
		check_groups.add("track_m_group");
		check_groups.add("track_app_group");
		check_groups.add("resume_add_group");
		check_groups.add("imc_info_add_group");
		host_ip_map = ImmutableMap.of("spark01", "10.5.20.100", "spark03", "10.5.20.18", "spark09", "10.9.20.31");
		
		CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
		CuratorFramework client = builder.connectString("10.5.20.18:2181,10.5.20.100:2181,10.9.20.31:2181/opt/kafka").sessionTimeoutMs(10000)
				.connectionTimeoutMs(5000).canBeReadOnly(true).retryPolicy(new ExponentialBackoffRetry(1000, Integer.MAX_VALUE)).defaultData(null)
				.build();
		client.start();
		this.client = client;
		
		new Thread(new Runnable() {
			@Override
			public void run() {
				while (true) {
					try {
						Thread.sleep(1000 * 60 * 2); // 2分钟
						process();
					} catch (Exception e) {
						log.error("Kafka2Monitor process error!", e);
					}
				}
			}
		}).start();
	}
	
	private void process() throws Exception {
		String consumer_path = "/consumers_online";
		List<String> groups = client.getChildren().forPath(consumer_path);
		
		Map<String, PartitionInfo> topic_partition_logSize = new HashMap<String, PartitionInfo>();
		Map<String, PartitionInfo> group_partition_offset = new HashMap<String, PartitionInfo>();
		
		for(String group : groups){
			if(check_groups.contains(group)){
				Multimap<Broker, PartitionInfo> map = HashMultimap.create();
				String topic = client.getChildren().forPath(consumer_path+"/"+group).get(0);
				List<String> partitions = client.getChildren().forPath(consumer_path+"/"+group+"/"+topic);
				for(String partition : partitions){
                   String str = new String(client.getData().forPath(consumer_path+"/"+group+"/"+topic+"/"+partition));
                   PartitionInfo pi = JsonUtils.fromJSON(str, PartitionInfo.class);
                   pi.setTopic(topic);
                   pi.setGroup(group);
                   map.put(pi.getBroker(), pi);
				}
				
				for(Broker broker: map.keySet()){
					String host = broker.getHost();
					int port = broker.getPort();
					List<PartitionInfo> piList = new ArrayList<PartitionInfo>(map.get(broker));
					Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = Maps.newHashMap();
					for (PartitionInfo pi : piList) {
						requestInfo.put(new TopicAndPartition(topic, pi.getPartition()), new PartitionOffsetRequestInfo(-1, 1));
					}
					
					OffsetRequest request = new OffsetRequest(requestInfo, (short) 0, "");
					SimpleConsumer consumer = null;
					try {
						consumer = new SimpleConsumer(host_ip_map.get(host), port, 3000, 1024, topic);
						OffsetResponse response = consumer.getOffsetsBefore(request);
						for (PartitionInfo pi : piList) {
							long[] offsets = response.offsets(topic, pi.getPartition());
							long logSize = offsets[0];
							pi.setLogSize(logSize);
							pi.setLag(logSize-pi.getOffset());
						}
					} finally {
						if (consumer != null)
							consumer.close();
					}
				}
				
				for (PartitionInfo pi : map.values()) {
					topic_partition_logSize.put(pi.getTopic() + "_" + pi.getPartition(), pi);
					group_partition_offset.put(pi.getGroup() + "_" + pi.getPartition(), pi);
				}
		}
			
		if (null != topic_partition_logSize_last) {
			for (String topic_partition : topic_partition_logSize.keySet()) {
				long last_logSize = topic_partition_logSize_last.get(topic_partition).getLogSize();
				long logSize = topic_partition_logSize.get(topic_partition).getLogSize();
				log.info(topic_partition + " last_logSize : " + last_logSize + ", logSize : " + logSize);
				if (logSize <= last_logSize) {
					String message = "Kafka topic_partition : %s, produce data error! last logSize : %d, logSize : %d";
					message = String.format(message, topic_partition, last_logSize, logSize);
					SMS.defaultSend(message);
				}
			}
		}
		topic_partition_logSize_last = topic_partition_logSize;
		
		if (null != group_partition_offset_last) {
			for (String group_partition : group_partition_offset.keySet()) {
				long last_offset = group_partition_offset_last.get(group_partition).getOffset();
				long offset = group_partition_offset.get(group_partition).getOffset();
				log.info(group_partition + " last_offset : " + last_offset + ", offset : " + offset);
				if (offset <= last_offset) {
					String message = "Kafka group_partition : %s, consume data error! last offset : %d, offset : %d";
					message = String.format(message, group_partition, last_offset, offset);
					SMS.defaultSend(message);
				}
			}
		}
		group_partition_offset_last = group_partition_offset;
	}
	}

	/**
	 * @param args
	 * @throws Exception 
	 */
	public static void main(String[] args) throws Exception {
		Kafka2Monitor km = new Kafka2Monitor();
		km.init();
		km.process();
		System.out.println(km.group_partition_offset_last);
		System.out.println(km.topic_partition_logSize_last);
		
	}

}


class Broker{
	private String host;
	private int port;
	
	@Override
	public int hashCode() {
		return host.hashCode()+port;
	}
	
	@Override
	public boolean equals(Object obj) {
		if (this == obj) return true;
		if (obj == null) return false;
		if (getClass() != obj.getClass()) return false;
		Broker other = (Broker) obj;
		if (host == null) {
			if (other.host != null) return false;
		} else if (!host.equals(other.host)) return false;
		if (port != other.port) return false;
		return true;
	}
	
	public String getHost() {
		return host;
	}
	public void setHost(String host) {
		this.host = host;
	}
	public int getPort() {
		return port;
	}
	public void setPort(int port) {
		this.port = port;
	}
	@Override
	public String toString() {
		return "Broker [" + (host != null ? "host=" + host + ", " : "")
		+ "port=" + port + "]";
	}
	
}

class PartitionInfo{
	private String topic;
	private String group;
	private long offset;
	private long logSize;
	private long lag;
	private int partition;
	private Broker broker;
	public long getOffset() {
		return offset;
	}
	public void setOffset(long offset) {
		this.offset = offset;
	}
	public int getPartition() {
		return partition;
	}
	public void setPartition(int partition) {
		this.partition = partition;
	}
	public Broker getBroker() {
		return broker;
	}
	public void setBroker(Broker broker) {
		this.broker = broker;
	}
	public long getLogSize() {
		return logSize;
	}
	public void setLogSize(long logSize) {
		this.logSize = logSize;
	}
	public long getLag() {
		return lag;
	}
	public void setLag(long lag) {
		this.lag = lag;
	}
	public String getTopic() {
		return topic;
	}
	public void setTopic(String topic) {
		this.topic = topic;
	}
	public String getGroup() {
		return group;
	}
	public void setGroup(String group) {
		this.group = group;
	}
	@Override
	public String toString() {
		return "PartitionInfo ["
		+ (topic != null ? "topic=" + topic + ", " : "")
		+ (group != null ? "group=" + group + ", " : "") + "offset="
		+ offset + ", logSize=" + logSize + ", lag=" + lag
		+ ", partition=" + partition + ", "
		+ (broker != null ? "broker=" + broker : "") + "]";
	}
	
}