package com.huatai.datacenter.service.impl;

import com.huatai.datacenter.constant.Constants;
import com.huatai.datacenter.entity.messagequeuemonitor.*;
import com.huatai.datacenter.service.ClusterService;
import com.huatai.datacenter.service.KafkaAdminService;
import com.huatai.datacenter.service.KafkaMonitorService;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.*;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;

/**
 * 最新版消息队列监控服务（Kafka）
 * 思路： listAllConsumeStatKafka为入口
 * 空间换时间，预处理，存map
 * 1. 获取所有topic
 * 2. 遍历一个topic对应的所有group，没有group的topic单独处理
 * 3. 获取该topic该group的所有分区，然后统计
 *
 * @author lizhong
 */
@Slf4j
@Service
public class KafkaMonitorServiceImpl implements KafkaMonitorService {
	private AdminClient adminClient;

	@Autowired
	private ClusterService clusterService;

	@Autowired
	private KafkaAdminService kafkaAdminService;

	// 该集群下的Kafka地址
	private String bootstrapServers = "";

	// Kafka key: topic+partition+group  value: 该分区的offset情况
	public Map<String/*brokerAddr*/, QueueStatInfo> topicPartitionGroupToQueueStat = new HashMap<>();

	// topic-partition -> broker
	Map<String/*topic/partition*/, String/*brokerIP*/> TopicPartitionToBrokerIP = new HashMap<>();

	// 所有消费者组
	Set<String> allGroups = new HashSet<>();

	// 消费组 -> 该消费组的所有主题分区
	Map<String/*group*/, Set<TopicPartition>> groupToTopicPartition = new HashMap<>();

	// 主题 -> 该主题的所有消费组
	Map<String/*topic*/, Set<String>/*groupList*/> topicToGroupList = new HashMap<>();

	// 定时调用，存的缓存
	public Map<String/*clusterID*/, List<LagStatVO>> clusterIDToLagStatCache = new HashMap<>();

	/**
	 * 根据集群clusterID获取主题消费情况列表缓存
	 *
	 * @param clusterID 集群ID
	 * @return 主题消费情况列表缓存
	 */
	public List<LagStatVO> getLagStatVOListCache(String clusterID) {
		return clusterIDToLagStatCache.getOrDefault(clusterID, new ArrayList<>());
	}

	/**
	 * 设置缓存
	 *
	 * @param lagStatVOListCache 主题数据的缓存
	 * @param clusterID          集群ID
	 */
	public void setLagStatVOListCache(String clusterID, List<LagStatVO> lagStatVOListCache) {
		this.clusterIDToLagStatCache.put(clusterID, lagStatVOListCache);
	}


	/**
	 * 根据集群ID，初始化AdminClient
	 *
	 * @param clusterID 集群ID
	 * @author lizhong
	 */
	public void init(String clusterID) {
		this.bootstrapServers = clusterService.getBrokers(clusterID);
		this.adminClient = !bootstrapServers.equals("") ?
			kafkaAdminService.getAdminClientByClusterID(clusterID) :
			null;
	}


	/**
	 * 获取所有主题，有clusterID参数
	 *
	 * @return 所有主题列表
	 */
	public Set<String> getAllTopic(String clusterID) {
		AdminClient tempAdminClient = kafkaAdminService.getAdminClientByClusterID(clusterID);
		ListTopicsOptions options = new ListTopicsOptions();
		options.listInternal(true);  // 设置为 true，以获取内部主题（internal topics）

		// 假设已经有了adminClient
		ListTopicsResult topicsResult = tempAdminClient.listTopics(options);
		KafkaFuture<Set<String>> future = topicsResult.names();
		try {
			return future.get();
		} catch (InterruptedException | ExecutionException e) {
			log.error("列出所有topic错误");
			throw new RuntimeException(e);
		}
	}


	/**
	 * 获取所有主题，没有clusterID参数
	 *
	 * @return 所有主题列表
	 */
	private Set<String> getAllTopic() {
		ListTopicsOptions options = new ListTopicsOptions();
		options.listInternal(true);  // 设置为 true，以获取内部主题（internal topics）
		// 假设已经有了adminClient
		ListTopicsResult topicsResult = this.adminClient.listTopics(options);
		KafkaFuture<Set<String>> future = topicsResult.names();
		try {
			return future.get();
		} catch (InterruptedException | ExecutionException e) {
			log.error("列出所有topic错误");
			throw new RuntimeException(e);
		}
	}


	/**
	 * 获取所有主题-分区的broker情况
	 *
	 * @return
	 */
	public PageResult getAllTopicPartitionState(String clusterID) {
		AdminClient tempAdminClient = kafkaAdminService.getAdminClientByClusterID(clusterID);

		List<TopicPartitionState> topicPartitionStateList = new ArrayList<>();

		Set<String> allTopic = getAllTopic(clusterID);
		allTopic.remove("__consumer_offsets");
		TopicDescription topicDescription; // topic:{partitionList}

		// 遍历所有topic
		for (String topic : allTopic) {
			try {
				topicDescription = tempAdminClient.describeTopics(Collections.singleton(topic)).values().get(topic).get();
			} catch (InterruptedException | ExecutionException e) {
				log.error("获取一个主题的分区broker情况出错, getAllTopicState()");
				throw new RuntimeException(e);
			}

			// 统计broker
			List<TopicPartitionInfo> partitions = topicDescription.partitions();

			// get each partition
			for (TopicPartitionInfo partition : partitions) {
				String brokerIP = getBrokerIP(partition);
				topicPartitionStateList.add(new TopicPartitionState(topic, partition.partition(), brokerIP));
			}
		} // 所有主题遍历结束
		return new PageResult(topicPartitionStateList.size(), topicPartitionStateList);
	}


	/**
	 * 预处理，获取所有消费组以及消费组对应的topic-partition FIXME: 空间换时间，预处理，可能有一致性问题
	 *
	 * @return 所有消费组
	 * @author lizhong
	 * @date 2023年8月9日
	 */
	private Set<String> getAllGroup() {
		Set<String> allGroups = new HashSet<>();
		try {
			allGroups = this.adminClient.listConsumerGroups().all().get(30, TimeUnit.SECONDS).stream()
				.map(ConsumerGroupListing::groupId).collect(Collectors.toSet());
		} catch (InterruptedException e) {
			log.error("获取所有主题失败，中断");
			throw new RuntimeException(e);
		} catch (ExecutionException e) {
			log.error("获取所有主题失败，运行错误");
			throw new RuntimeException(e);
		} catch (TimeoutException e) {
			log.error("获取所有主题失败，超时");
			throw new RuntimeException(e);
		}

		for (String group : allGroups) {
			Set<TopicPartition> topicPartitions = new HashSet<>();
			try {
				topicPartitions = this.adminClient.listConsumerGroupOffsets(group).
					partitionsToOffsetAndMetadata().get().keySet();
			} catch (InterruptedException | ExecutionException e) {
				throw new RuntimeException(e);
			}
			// 预处理，消费组对应的topic存入map
			groupToTopicPartition.put(group, topicPartitions);

			for (TopicPartition topicPartition : topicPartitions) {
				// 预处理，直接获取每个主题对应消费组的map
				if (topicToGroupList.containsKey(topicPartition.topic())) {
					topicToGroupList.get(topicPartition.topic()).add(group);
				} else {
					Set<String> groupList = new HashSet<>();
					groupList.add(group);
					topicToGroupList.put(topicPartition.topic(), groupList);
				}
			}
		}
		return allGroups;
	}


	/**
	 * 根据主题获取该主题所有分区情况!!!没有消费组的!!!，同时记录topicpartition的offset情况
	 * 耗时点
	 *
	 * @param topic 主题
	 * @return
	 * @date 2023年7月27日 14点52分
	 * @author lizhong
	 */
	private List<LagStatVO> getTopicPartitionByTopicWithoutConsumer(String topic) {
		List<LagStatVO> lagStatVOList = new ArrayList<>();

		// 通过临时消费组获取，拉取堆积消息的临时消费组属性
		Properties props = new Properties();
		props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
		props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		props.put(ConsumerConfig.GROUP_ID_CONFIG, Constants.KeyStr.TEMP_GROUP_ID);
		props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

		Consumer<Long, String> consumer = new KafkaConsumer<>(props);

		// 新版本
		List<PartitionInfo> partitionInfoSet = consumer.partitionsFor(topic);
		consumer.assign(partitionInfoSet.stream()
			.map(partitionInfo -> new TopicPartition(partitionInfo.topic(),
				partitionInfo.partition()))
			.collect(Collectors.toList()));
		consumer.poll(Duration.ofMillis(0));
		Set<TopicPartition> assignedPartitionList = consumer.assignment();
//		consumer.seekToBeginning(assignedPartitionList); // 开始的offset
//		assignedPartitionList.forEach(topicPartition -> {
//			final long startOffset = consumer.position(topicPartition);
//			log.debug("topic: {}, partition: {}, startOffset: {}", topicPartition.topic(), topicPartition.partition(), startOffset);
//		});
		consumer.seekToEnd(assignedPartitionList);
		assignedPartitionList.forEach(topicPartition -> {
			long latestOffset = consumer.position(topicPartition);
			String brokerIP = TopicPartitionToBrokerIP.get(topic + topicPartition.partition());
			LagStatVO lagStatVO = new LagStatVO(topic, topicPartition.partition(), brokerIP, null, latestOffset);

			// 记录该TopicPartition的offset情况
			String key = topic + "/" + topicPartition.partition() + "/";
			QueueStatInfo queueStatInfo = new QueueStatInfo(topic, topicPartition.partition(), latestOffset, 0L); // consumerOffset那里不一定为0L
			topicPartitionGroupToQueueStat.put(key, queueStatInfo);

			lagStatVOList.add(lagStatVO);
		});
		return lagStatVOList;
	}


	/**
	 * 根据topic获取消费该topic的消费组group Kafka
	 *
	 * @param topic 主题
	 * @return 消费该topic的所有group
	 * @author lizhong
	 */
	public Set<String> getGroupListByTopicKafka(String topic) {
		Set<String> filteredGroups = topicToGroupList.getOrDefault(topic, new HashSet<>());
		return filteredGroups;
	}


	/**
	 * 根据主题topic且消费组groupID获得消费情况，一个KafkaTopicConsumerStateVO代表一个消费者一个主题一个分区的消费情况 Kafka, 同时记录topicpartition的offset情况
	 *
	 * @param topic   主题
	 * @param groupID 消费组ID
	 * @return TopicConsumerInfo该主题该消费组的消费情况（包括所有队列情况的列表）
	 * @author lizhong
	 */
	public TopicConsumerInfo getConsumeStatsListByTopicGroupKafka(String topic, String groupID) {
		TopicConsumerInfo nowTopicConsumerInfo = new TopicConsumerInfo(topic, groupID);

		Properties props = new Properties();
		ListConsumerGroupOffsetsResult result = adminClient.listConsumerGroupOffsets(groupID);
		try {
			// 消费者组当前最新消费消息的位移值offset
			Map<TopicPartition, OffsetAndMetadata> consumedOffsets = result.partitionsToOffsetAndMetadata().get(10, TimeUnit.SECONDS);
			props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); // 禁止自动提交位移
			props.put(ConsumerConfig.GROUP_ID_CONFIG, groupID); // 当前消费组
			props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); // 所有broker
			props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
			props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
			// 通过新建一个消费者来监视消费情况（只监视一个broker）
			try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
				// 该主题该消费组：最新生产的消息的位移值endOffsets
				Map<TopicPartition, Long> endOffsets = consumer.endOffsets(consumedOffsets.keySet());

				// 该主题该消费组的所有队列
				List<LagStatVO> lagStatVOList = new ArrayList<>();

				// 第三层遍历：分区，遍历该主题+该消费组的所有分区
				for (Map.Entry<TopicPartition, Long> entry : endOffsets.entrySet()) {
					TopicPartition topicPartition = entry.getKey();
					if (!topic.equals(topicPartition.topic())) {
						continue; // 只选择当前的消费组group
					}
					int partition = topicPartition.partition(); // 分区
					long logEndOffset = entry.getValue(); // 最新生产的消息的位移值
					long offset = consumedOffsets.get(entry.getKey()).offset(); // 消费者组当前最新消费消息的位移值
					long lag = logEndOffset - offset; // logEndOffset-offset

					// 一个topic-partition-broker的情况
					QueueStatInfo queueStatInfo = new QueueStatInfo(
						topic, groupID, partition, logEndOffset, offset, lag);

					// 存入一个分区的偏移信息，方便后续拉取堆积消息使用
					String key = topic + "/" + partition + "/" + groupID;
					topicPartitionGroupToQueueStat.put(key, queueStatInfo);

					String brokerIP = TopicPartitionToBrokerIP.get(topic + partition);
					LagStatVO lagStatVO = new LagStatVO(queueStatInfo.getTopic(), queueStatInfo.getQueueId(),
						brokerIP, queueStatInfo.getGroup(), queueStatInfo.getLag());
					lagStatVOList.add(lagStatVO);
				}
				nowTopicConsumerInfo.setLagStatVOList(lagStatVOList);
				return nowTopicConsumerInfo;
			}
		} catch (InterruptedException e) {
			Thread.currentThread().interrupt();
			e.printStackTrace();
			return null;
		} catch (ExecutionException e) {
			e.printStackTrace();
			return null;
		} catch (TimeoutException e) {
			log.error("获取消费组的lag超时: {}", groupID);
			e.printStackTrace();
			return null;
		}
	}

	/**
	 * 为这个主题生成对应broker的map，topic-partition --> broker
	 *
	 * @param topic 主题
	 * @author lizhong
	 */
	private void getBrokerByTopicPartition(String topic) {
		// 1 先记录每个分区的broker
		TopicDescription topicDescription; // topic : {partitionList}
		try {
			topicDescription = adminClient.describeTopics(Collections.singleton(topic)).values().get(topic).get();
		} catch (InterruptedException | ExecutionException e) {
			log.error("获取一个主题的分区broker情况出错, getAllTopicState()");
			throw new RuntimeException(e);
		}
		// 统计broker
		List<TopicPartitionInfo> partitions = topicDescription.partitions();
		// get each partition in this topic
		for (TopicPartitionInfo partition : partitions) {
			String brokerIP = getBrokerIP(partition);
			TopicPartitionToBrokerIP.put(topic + partition.partition(), brokerIP);
		}
	}

	/**
	 * 主题 -> (消费组名, 消费信息(总堆积和各个队列的情况)) Kafka
	 * 2023年8月7日从这里入手，补上broker字段
	 *
	 * @param topic 主题
	 * @return 该主题所有消费组的消费情况List(List ( KafkaConsumerStateEntity))
	 * @author lizhong
	 * @date 2023年8月3日 21点41分
	 */
	public List<LagStatVO> getConsumeStatsListByTopicKafka(String topic) {
		getBrokerByTopicPartition(topic);
		List<LagStatVO> lagStatVOList = new ArrayList<>();

		// 根据该主题，获得该主题所有消费组，取map
		Set<String> groupList = getGroupListByTopicKafka(topic);
		// 跳过临时group
		groupList.remove(Constants.KeyStr.TEMP_GROUP_ID);

		// 第二层遍历：遍历该主题下的所有消费者 TODO 2个线程
		for (String group : groupList) {
			TopicConsumerInfo topicConsumerInfo;
			topicConsumerInfo = getConsumeStatsListByTopicGroupKafka(topic, group);
			lagStatVOList.addAll(topicConsumerInfo.getLagStatVOList());
		}
		// 针对的是没有消费组的主题
		if (groupList.size() == 0) {
			// 获取该主题的所有分区
			lagStatVOList.addAll(getTopicPartitionByTopicWithoutConsumer(topic));
		}
		return lagStatVOList;
	}


	/**
	 * 获取该分区的所有broker
	 *
	 * @param partition 该主题的该分区
	 * @return 该主题该分区的所有broker
	 * @date 2023年8月9日
	 * @author lizhong
	 */
	private String getBrokerIP(TopicPartitionInfo partition) {
		Set<Node> brokerIPs = new HashSet<>();
		// 该主题分区的所有ip地址字符串
		StringBuilder brokerIP = new StringBuilder();
		// 2023年8月3日获取leader和副本的所有broker
		Node leader = partition.leader();
		if (!brokerIPs.contains(leader)) {
			brokerIPs.add(leader);
			brokerIP.append(leader.host() + ":" + leader.port());
		}

		List<Node> replicas = partition.replicas();
		// 所有副本
		for (Node replica : replicas) {
			if (!brokerIPs.contains(replica)) {
				brokerIPs.add(replica);
				brokerIP.append(", " + replica.host() + ":" + replica.port());
			}
		}

		return new String(brokerIP);
	}


	/**
	 * 根据offset获取lag消息
	 *
	 * @param brokerOffset   生产偏移
	 * @param consumerOffset 消费偏移
	 * @param topic          主题
	 * @param partition      分区
	 * @return lag列表
	 * @author lizhong
	 */
	private List<LagRecord> getLagRecordByOffset(long brokerOffset, long consumerOffset, String topic, int partition) {
		List<LagRecord> lagRecordList = new ArrayList<>();

		// 拉取堆积消息的临时消费组属性
		Properties props = new Properties();
		props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); // 之前通过clusterID获得的brokerIP
		props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
		// !!!!!!!!!为一个新的、与原有消费者组不同的消费者组ID!!!!!!!!!
		props.put(ConsumerConfig.GROUP_ID_CONFIG, Constants.KeyStr.TEMP_GROUP_ID);

		// 辅助消费者，在别的新的消费组，订阅着这个主题
		try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
			// 限定了主题和分区!!!
			TopicPartition topicPartition = new TopicPartition(topic, partition);

			consumer.assign(Collections.singleton(topicPartition));
			consumer.seek(topicPartition, consumerOffset);

			// 遍历该broker该分区的lag
			while (consumerOffset < brokerOffset) {
				// 通过这个辅助的消费者获取所有堆积的消息，这里会获取到最新的brokerOffset，而不是传入的brokerOffset
				ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

				long count = 0L;
				// 遍历所有堆积的消息
				for (ConsumerRecord<String, String> record : records) {
					Long lagRecordOffset = record.offset(); // 每条堆积消息的当前偏差
					if (lagRecordOffset > brokerOffset) {
						break;
					}
					Long timestamp = record.timestamp(); // 时间戳可以指示消息产生的时间，也可以是在消息进入 Kafka 的时间，毫秒
					String value = record.value(); // 该消息的具体内容
					LagRecord lagRecord = new LagRecord(lagRecordOffset, timestamp, value);
					lagRecordList.add(lagRecord);
					count++;
				}
				consumerOffset += count;
				consumer.seek(topicPartition, consumerOffset);
			}
		} catch (Exception e) {
			log.error("获取主题{}分区{}从{}到{}的堆积消息失败！", topic, partition, brokerOffset, consumerOffset);
			e.printStackTrace();
		}
		return lagRecordList;
	}


	/**
	 * 获取(主题, 消费组, broker)的lag消息 Kafka
	 *
	 * @param lagRecordQueryDTO (topic, group, brokerAddr)的封装类
	 * @return lag消息
	 * @data 2023年7月27日 14点55分
	 * @author lizhong
	 */
	public PageResult getLagRecordListKafka(LagRecordQueryDTO lagRecordQueryDTO) {
		String topic = lagRecordQueryDTO.getTopic();
		String group = lagRecordQueryDTO.getGroup();
		int partition = lagRecordQueryDTO.getPartition();
		log.info("获取主题: {} + 消费组: {} + broker: {} 的堆积消息...", topic, group, partition);

		// 该topic group partition下的offset，group可能为空
		String key = (group == null) ? (topic + "/" + partition + "/") : (topic + "/" + partition + "/" + group);
		QueueStatInfo queueStatInfo = topicPartitionGroupToQueueStat.get(key);

		List<LagRecord> lagRecordList;
		long brokerOffset = queueStatInfo.getBrokerOffset();
		long consumerOffset = queueStatInfo.getConsumerOffset();
		lagRecordList = getLagRecordByOffset(brokerOffset, consumerOffset, topic, partition);

		return new PageResult(lagRecordList.size(), lagRecordList);
	}


	/**
	 * 接口1：Kafka 进入页面首先展示一个集群的，所有主题所有消费组所有broker的消费情况
	 *
	 * @param clusterID 集群id
	 * @return (主题, 消费组, broker地址, broker名称, lag)
	 * @data 2023-7-21-16:45
	 * @author lizhong
	 */
	public PageResult listAllConsumeStatKafka(String clusterID) {
		// 根据此clusterID初始化broker地址
		init(clusterID);

		// 1. 列出所有topic
		Set<String> topicList;
		topicList = getAllTopic();
		List<LagStatVO> lagStatVOList = new ArrayList<>();
		topicList.remove("__consumer_offsets");

		// 预处理，只调用一次获取所有group
		allGroups = getAllGroup();

		// 调用多线程处理方法
		lagStatVOList = multiThreadProcessTopicList(new ArrayList<>(topicList));

		// 返回总的消费情况结果
		return new PageResult(lagStatVOList.size(), lagStatVOList);
	}


	/**
	 * 多线程处理主题列表
	 *
	 * @param topicList 主题列表
	 * @return 处理完的lag信息
	 * @date 2023年8月3日 21点36分
	 * @author lizhong
	 */
	private List<LagStatVO> multiThreadProcessTopicList(List<String> topicList) {
		List<LagStatVO> lagStatVOList = new ArrayList<>();
		int size = topicList.size();
		int threadNumber = 15; // 线程个数！！！
		System.out.println("\n主题列表个数" + size + "\n" + (size >= threadNumber ? "使用多线程" : "使用单线程"));

		// 小于的，就不用多线程
		if (size < threadNumber) {
			// 第一层遍历：遍历所有主题
			for (String topic : topicList) {
				lagStatVOList.addAll(getConsumeStatsListByTopicKafka(topic));
			}
		} else {
			int groupNumber = size / threadNumber;
			int leftNumber = size % threadNumber;
			System.out.println("实际线程个数" + threadNumber + ((leftNumber == 0) ? 0 : 1));
			List<Thread> threadList = new ArrayList<>();

			for (int i = 0; i < threadNumber + ((leftNumber == 0) ? 0 : 1); i++) {
				int cnt = i;
				int a = groupNumber * cnt;
				int b = ((cnt < threadNumber) ? groupNumber * (cnt + 1) : size);
				Thread t = new Thread(() -> {
					for (int j = a; j < b; j++) {
						lagStatVOList.addAll(getConsumeStatsListByTopicKafka(topicList.get(j)));
					}
				});
				threadList.add(t);
			}
			// 启动线程
			for (Thread t : threadList) {
				t.start();
			}
			try {
				for (Thread t : threadList) {
					t.join();
				}
				System.out.println("\nall threads done!");
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}
		return lagStatVOList;
	}
}
