package cn.lsh.kafka.util;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryUntilElapsed;
import org.apache.spark.streaming.kafka.OffsetRange;
import scala.Int;

import java.util.*;

public class OffsetUtil {

	/**
	 * 获取指定topic的partition元数据信息
	 *
	 * @param brokerServer kafka集群地址
	 * @param topic 主題
	 * @return TreeMap
	 */
	public TreeMap<Integer, PartitionMetadata> getTopicPartitionInfo(String brokerServer, String topic) {
		TreeMap<Integer, PartitionMetadata> partitionInfo = new TreeMap<>();
		for (String broker : brokerServer.split(",")) {
			String host = broker.split(":")[0];
			int port = Integer.parseInt(broker.split(":")[1]);
			SimpleConsumer consumer = new SimpleConsumer(host, port, 64 * 10000, 1024, "consumer_" + System.currentTimeMillis());
			TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(Collections.singletonList(topic));
			TopicMetadataResponse topicResponse = consumer.send(topicMetadataRequest);
			for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) {
				for (PartitionMetadata partition : topicMetadata.partitionsMetadata()) {
					partitionInfo.put(partition.partitionId(), partition);
				}
			}
			consumer.close();
		}
		return partitionInfo;
	}

	/**
	 * 查询指定topic当前生产消息的最大offset
	 *
	 * @param brokerServer kafka集群地址
	 * @param topic        主題
	 * @return map
	 */
	public static Map<TopicAndPartition, Long> getTopicOffsetsByProducer(String brokerServer, String topic) {
		Map<TopicAndPartition, Long> topicOffsets = new HashMap<>();
		for (String broker : brokerServer.split(",")) {
			//不同分区的offset保存在不同的broker本地，所以这里要循环访问所有broker
			String host = broker.split(":")[0];
			int port = Integer.parseInt(broker.split(":")[1]);
			SimpleConsumer simpleConsumer = new SimpleConsumer(host, port, 64 * 10000, 1024, "consumer_" + System.currentTimeMillis());
			TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(Collections.singletonList(topic));
			//查询topic分区信息
			TopicMetadataResponse topicResponse = simpleConsumer.send(topicMetadataRequest);
			for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) {
				for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
					Broker leader = partitionMetadata.leader();
					if (leader != null) {
						TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionMetadata.partitionId());

						//第二个参数为1时，返回最新的offset，为2时返回一个包含最大和最小offset的元组
						PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 2);
						OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo),
								kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
						//获取topic当前分区的offset
						OffsetResponse offsetResponse = simpleConsumer.getOffsetsBefore(offsetRequest);
						if (!offsetResponse.hasError()) {
							long[] offsets = offsetResponse.offsets(topic, partitionMetadata.partitionId());
							topicOffsets.put(topicAndPartition, offsets[0]);
						}
					}
				}
			}
			simpleConsumer.close();
		}
		return topicOffsets;
	}

	/**
	 * 从zk上读取消费者存的topic的offset
	 *
	 * @param zookeeper     zk集群地址
	 * @param consumerGroup 消费者组名
	 * @param topic         主题
	 * @return map
	 */
	public static Map<TopicAndPartition, Long> getTopicOffsetsByConsumerForZk(String zookeeper, String consumerGroup, String topic) {
		Map<TopicAndPartition, Long> topicOffsets = new HashMap<>();
		//kafka默认存储在zk的offset路径：/consumers/{group_name}/offsets/{topic}/{partition}
		String nodePath = "/consumers/" + consumerGroup + "/offsets/" + topic;
		CuratorFramework curatorFramework = curatorFramework(zookeeper);
		curatorFramework.start();
		ObjectMapper objectMapper = new ObjectMapper();
		try {
			if (curatorFramework.checkExists().forPath(nodePath) != null) {
				List<String> partitionNodes = curatorFramework.getChildren().forPath(nodePath);
				for (String node : partitionNodes) {
					int partition = Integer.parseInt(node);
					byte[] bytes = curatorFramework.getData().forPath(nodePath + "/" + node);
					Long offset = objectMapper.readValue(bytes, Long.class);
					topicOffsets.put(new TopicAndPartition(topic, partition), offset);
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			curatorFramework.close();
		}
		return topicOffsets;
	}

	private static CuratorFramework curatorFramework(String zookeeper) {
		return CuratorFrameworkFactory.builder().connectString(zookeeper).connectionTimeoutMs(1000)
				.sessionTimeoutMs(10000).retryPolicy(new RetryUntilElapsed(1000, 1000)).build();
	}

	/**
	 * 保存offset到zookeeper
	 *
	 * @param zookeeper zk集群地址
	 * @param consumerGroup 消費者組id
	 * @param offsetRanges offsets信息
	 */
	public static void saveTopicOffsetsToZk(String zookeeper, String consumerGroup, OffsetRange[] offsetRanges) {
		CuratorFramework curatorFramework = curatorFramework(zookeeper);
		curatorFramework.start();
		ObjectMapper objectMapper = new ObjectMapper();
		for (OffsetRange offsetRange : offsetRanges) {
			String topic = offsetRange.topic();
			int partition = offsetRange.partition();
			long fromOffset = offsetRange.fromOffset();
			long untilOffset = offsetRange.untilOffset();
			String nodePath = "/consumers/" + consumerGroup + "/offsets/" + topic + "/" + partition;
			System.out.println("nodePath：" + nodePath);
			System.out.println("topic=" + topic + ", partition=" + partition + ", fromOffset=" + fromOffset + ", untilOffset=" + untilOffset);
			try {
				if (curatorFramework.checkExists().forPath(nodePath) != null) {
					curatorFramework.setData().forPath(nodePath, objectMapper.writeValueAsBytes(untilOffset));
				} else {
					String topicPath = "/consumers/" + consumerGroup + "/offsets/" + topic;
					if (curatorFramework.checkExists().forPath(topicPath) == null) {
						String groupPath = "/consumers/" + consumerGroup;
						if (curatorFramework.checkExists().forPath(groupPath) == null) {
							curatorFramework.create().forPath(groupPath);
						} else {
							curatorFramework.create().forPath(topicPath);
						}
					}
					curatorFramework.create().forPath(nodePath, objectMapper.writeValueAsBytes(untilOffset));
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
		}
		curatorFramework.close();
	}

	public static void main(String[] args) throws Exception {
		Map<TopicAndPartition, Long> stream_2 = getTopicOffsetsByProducer("node00:9092,node01:9092,node02:9092", "stream_2");
		stream_2.forEach((k, v) -> System.out.println(k.topic() + "  " + k.partition() + "  " + v));
		/*Map<TopicAndPartition, Long> stream_1 = getTopicOffsetsByConsumerForZk("node01:2181,node02:2181,node03:2181", "spark-streaimg-kafka", "stream_1");
		stream_1.forEach((k, v) -> System.out.println(k.topic() + "  " + k.partition() + "  " + v));*/
	}
}
