package com.huatai.datacenter.service;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.huatai.datacenter.config.BrokerConfig;
import com.huatai.datacenter.config.TopicConfig;
import com.huatai.datacenter.constant.Constants;
import com.huatai.datacenter.entity.BrokerEntity;
import com.huatai.datacenter.entity.ClusterEntity;
import com.huatai.datacenter.utils.KafkaAdmins;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.stream.Collectors;

/**
 * @author Lion
 * @date 2023/3/29  10:58
 */
@Slf4j
@Service
public class KafkaManagerService {

	@Autowired
	private TopicInfoService topicInfoService;

	@Autowired
	private ClusterService clusterService;

	@Autowired
	private KafkaAdminService kafkaAdminService;

	@Autowired
	private ZKService zkService;

	public List<Integer> getPartitionByTopic(String clusterId, String topicName) {
		List<Integer> list = new ArrayList<>();

		return list;
	}

	/**
	 * 从集群实时获取Topic，并且关联owner,team,FileSize TODO 有错 没按分区为最小单位！！！！！！！！！！！！
	 *
	 * @param clusterId
	 * @return
	 */
	public JSONArray topicList(String clusterId) {
		List<ClusterEntity> clusterInfos = new ArrayList<>();
		clusterInfos.add(clusterService.selectById(Long.parseUnsignedLong(clusterId)));
		JSONArray array = new JSONArray();
		clusterInfos.forEach(cluster -> {
			try {
				KafkaAdmins kafkaAdmins = kafkaAdminService.getKafkaAdmins(cluster.getId().toString());
				Set<String> topicNames = kafkaAdmins.listTopics();
				Map<String, TopicDescription> map = kafkaAdmins.descTopics(topicNames);
				map.forEach((k, v) -> {
					int isrCount = 0;
					int partitionCount = 0;
					// 2023年8月3日获取leader和副本的所有broker
					Set<Node> brokerIPs = new HashSet<>();
					// ip地址字符串
					StringBuilder brokerIP = new StringBuilder();
//					List<String> brokerIP = new ArrayList<>();
					// broker id字符串
					StringBuilder brokerID = new StringBuilder();
//					List<Integer> brokerID = new ArrayList<>();

					// 遍历该主题所有分区
					for (TopicPartitionInfo partition : v.partitions()) {
						partitionCount = partitionCount + partition.replicas().size();
						isrCount = isrCount + partition.isr().size();

						// 2023年8月3日获取leader和副本的所有broker
						Node brokerNode = partition.leader();
						brokerIPs.add(brokerNode);
//						brokerIP.add(brokerNode.host() + ":" + brokerNode.port());
						brokerIP.append(brokerNode.host() + ":" + brokerNode.port());
//						brokerID.add(brokerNode.id());
						brokerID.append(brokerNode.id());

						List<Node> replicas = partition.replicas();
						// 所有副本
						for (Node replica : replicas) {
							if (!brokerIPs.contains(replica)) {
								brokerIPs.add(replica);
								brokerIP.append(", " + replica.host() + ":" + replica.port());
								brokerID.append(", " + replica.id());
							}
						}

					}
					String key = this.generatorKey(cluster.getId().toString(), k);

					System.out.println("key" + key);
					JSONObject obj = new JSONObject();

					obj.put(TopicConfig.TTL, 86400000);
					obj.put(TopicConfig.FILE_SIZE, 0);
					obj.put(TopicConfig.PARTITION, v.partitions().size());
					obj.put(TopicConfig.REPLICATION, v.partitions().get(0).replicas().size());
					obj.put(Constants.KeyStr.CLUSTER, cluster.getClusterName());
					obj.put(BrokerConfig.TOPIC_NAME, k);
					obj.put(Constants.KeyStr.LOWER_CLUSTER_ID, cluster.getId());
					obj.put(TopicConfig.UNDER_REPLICATION, (1 - (isrCount / partitionCount)) * 100);

					// 2023年8月3日 存入该主题涉及的所有broker
					obj.put("brokerIP", brokerIP);
					obj.put("brokerID", brokerID);
					array.add(obj);
				});
			} catch (Exception e) {
				log.error("get topic info by clusterId has error", e);
			}

		});
		return array;
	}

	private String generatorKey(String clusterId, String topicName) {
		return clusterId + "|" + topicName;
	}

	/**
	 * @param clusterId
	 * @param topicName
	 * @return boolean
	 * @author Lion
	 * @description 删除topic
	 */
	public boolean deleteTopic(String clusterId, String topicName) {
		boolean delete = false;
		KafkaAdmins admins = kafkaAdminService.getKafkaAdmins(clusterId);
		try {
			boolean isClusterDelete = admins.delete(topicName);
			if (isClusterDelete) {
				boolean dataBaseDelete = topicInfoService.isDelete(topicName, clusterId);
				if (dataBaseDelete) {
					delete = true;
				}
			}
		} catch (Exception e) {
			log.error("delete topic has error", e);
		}
		return delete;
	}

	public Map<String, Long> takeTopicSize(String clusterId) throws Exception {
		try {

			List<BrokerEntity> brokerInfos = zkService.getZK(clusterId).getBrokers();
			List<Integer> brokerIds = brokerInfos.stream().map(BrokerEntity::getBid).collect(Collectors.toList());
			Map<String, Long> mapSize = kafkaAdminService.getKafkaAdmins(clusterId).getTopicDiskSizeForBroker(brokerIds);
			//topicFileSizeWriteToEs(mapSize,clusterId);
			return mapSize;

		} catch (UnsupportedVersionException ignored) {

		} catch (Exception e) {
			throw new Exception("get topic file size has error", e);
		}
		return new HashMap<>(0);
	}
}
