package dyyx.util;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;

import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
import org.apache.kafka.clients.admin.ConsumerGroupListing;
import org.apache.kafka.clients.admin.DescribeClusterResult;
import org.apache.kafka.clients.admin.DescribeConsumerGroupsResult;
import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult;
import org.apache.kafka.clients.admin.ListConsumerGroupsOptions;
import org.apache.kafka.clients.admin.ListConsumerGroupsResult;
import org.apache.kafka.clients.admin.ListTopicsOptions;
import org.apache.kafka.clients.admin.ListTopicsResult;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.admin.TopicListing;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.requests.DescribeLogDirsResponse.LogDirInfo;
import org.apache.kafka.common.requests.DescribeLogDirsResponse.ReplicaInfo;
import org.apache.kafka.common.serialization.StringDeserializer;

import dyyx.comp.KafkaConfigEntryComparator;
import dyyx.comp.TopicPartitionComparator;
import dyyx.dto.ConsumerGroupDTO;
import dyyx.dto.ConsumerGroupsInfo;
import dyyx.dto.KafkaBroker;
import dyyx.dto.KafkaClient;
import dyyx.dto.KafkaClusterInfo;
import dyyx.dto.KafkaOffsetInfo;
import dyyx.dto.KafkaReplicaInfo;
import dyyx.dto.KafkaTopicReplicaSummaryInfo;
import dyyx.dto.PartitionOffsetInfo;


public abstract class KafkaUtil {
	private static final Random RAND = new Random();
	
	private static volatile KafkaClient client;

	
	public static synchronized void connect(String server)throws Exception{
		if(client!=null){
			throw new Exception("connected"); 
		}
		
		if(StringUtils.isBlank(server)){
			throw new Exception("kafka server blank"); 
		}
		
		Properties properties = new Properties();
        properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,server);
        properties.setProperty(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG,"3000");

        AdminClient adminClient = AdminClient.create(properties);
        
        DescribeClusterResult describeClusterResult = adminClient.describeCluster();
        // 避免无效地址 快速返回
        Node controller = describeClusterResult.controller().get();
    	// Collection<Node> nodes = describeClusterResult.nodes().get();
    	
    	if(controller==null) {
    		throw new Exception("controller is null");
    	}
		
		KafkaClient clienttmp = new KafkaClient(adminClient,server);
		client = clienttmp;			
	}
	
	public static synchronized KafkaClient getClient(){
		if(client==null){
			throw new RuntimeException("not connected"); 
		}
		return client;
	}
	
	public static synchronized void disconnect()throws Exception{
		if(client==null){
			throw new Exception("not connected"); 
		}
		client.adminClient.close();
		client = null;
	}
	
	
	public static synchronized String genShowOffsetGroupId(){
		
		return "show_offset_group_"+System.currentTimeMillis()+"_"+RAND.nextInt(1000);
	}
	
    public static void close(KafkaConsumer consumer){
    	if(consumer==null){
    		return;
    	}
    	try{
    		consumer.close();
    	}catch(Throwable e){
    		RunStatusUtil.logError("kafkaConsumerCloseError", e);
    	}
		
	}
    // 
    public static String getZookeeperConnect(List<ConfigEntry> list){
    	if(list==null){
    		return null;
    	}
    	// zookeeper.connect
    	for(ConfigEntry item:list){
    		if("zookeeper.connect".equals(item.name())){
    			return item.value();
    		}
    	}
    	
    	return null;	
    }
    
    public static Map<String,ConfigEntry> buildConfigMap(List<ConfigEntry> list){
    	if(list==null){
    		return null;
    	}
    	Map<String,ConfigEntry> map = new HashMap<>();
    	for(ConfigEntry item:list){
    		map.put(item.name(), item);
    	}
    	
    	return map;	
    }
    
    public static TopicDescription getTopicDesc(String topic)throws Exception{
    	if(StringUtils.isBlank(topic)){
			return null;
		}
    	List<String> topics = new ArrayList<>();
		topics.add(topic);
    	final  AdminClient adminClientTmp = getClient().adminClient;
    	Map<String, TopicDescription> map = adminClientTmp.describeTopics(topics).all().get();
    	if(map==null){
    		return null;
    	}
    	return map.get(topic);
    }

    
    
    public static List<ConfigEntry> getBrokerConfig(int brokerId)throws Exception{
    	if(brokerId<0){
    		return null;
    	}
        final  AdminClient adminClientTmp = getClient().adminClient;

    	List<ConfigResource> configResources = new ArrayList<>();
		configResources.add(new ConfigResource(ConfigResource.Type.BROKER,brokerId+""));
		// configResources.add(new ConfigResource(ConfigResource.Type.TOPIC,"topic"));

		Map<ConfigResource, Config> configs = adminClientTmp.describeConfigs(configResources).all().get();
		// System.out.println("configs="+configs);
		if(configs==null || configs.isEmpty()){
			return null;
		}
		Config config = new ArrayList<Config>(configs.values()).get(0);
		List<ConfigEntry> list = new ArrayList<>(config.entries());
			
		sortKafkaConfig(list);

    	return list;
    }
    
    public static List<ConfigEntry> getTopicConfig(String topicName)throws Exception{
		if(StringUtils.isBlank(topicName)){
			return null;
		}

    	
		final  AdminClient adminClientTmp = getClient().adminClient;

    	List<ConfigResource> configResources = new ArrayList<>();
		configResources.add(new ConfigResource(ConfigResource.Type.TOPIC,topicName));
		// configResources.add(new ConfigResource(ConfigResource.Type.TOPIC,"topic"));

		Map<ConfigResource, Config> configs = adminClientTmp.describeConfigs(configResources).all().get();
		// System.out.println("configs="+configs);
		if(configs==null || configs.isEmpty()){
			return null;
		}
		Config config = new ArrayList<Config>(configs.values()).get(0);
		List<ConfigEntry> list = new ArrayList<>(config.entries());
			
		sortKafkaConfig(list);
    	return list;    
    }
    	
    private static void sortKafkaConfig(List<ConfigEntry> list){
    	if(list==null || list.isEmpty()){
    		return;
    	}
    	KafkaConfigEntryComparator comp = new KafkaConfigEntryComparator();
    	
    	list.sort(comp);
    }	
    
    
    public static List<KafkaTopicReplicaSummaryInfo> getBrokerLogDirInfoList(int brokerId)throws Exception{
    	if(brokerId<0){
    		return null;
    	}
    	Map<String, LogDirInfo> map = getBrokerLogDirInfo(brokerId);
    	
    	return buildTopicReplicaSummaryInfo(map);
    }
    
    public static Map<String, LogDirInfo> getBrokerLogDirInfo(int brokerId)throws Exception{
    	if(brokerId<0){
    		return null;
    	}
    	final  AdminClient adminClientTmp = getClient().adminClient;
    	List<Integer> ids = new ArrayList<>();
		ids.add(brokerId);
		

		Map<Integer, Map<String, LogDirInfo>> logDirInfoMap = adminClientTmp.describeLogDirs(ids).all().get();
		
		if(logDirInfoMap==null){
			return null;
		}
		
		return logDirInfoMap.get(brokerId);
    }
    
    public static List<KafkaTopicReplicaSummaryInfo> buildTopicReplicaSummaryInfo(Map<String, LogDirInfo> map){
    	if(map==null || map.isEmpty()){
    		return null;
    	}
    	Collection<LogDirInfo> logDirInfos = map.values();
    	if(logDirInfos==null || logDirInfos.isEmpty()){
    		return null;
    	}
    	
    	List<KafkaReplicaInfo> replicaInfoList = new ArrayList<>();
    	
    	for(LogDirInfo item:logDirInfos){
    		Map<TopicPartition, ReplicaInfo> replicaInfos = item.replicaInfos;
    		if(replicaInfos==null || replicaInfos.isEmpty()){
    			continue;
    		}
    		Set<Map.Entry<TopicPartition, ReplicaInfo>> kvs = replicaInfos.entrySet();
    		for(Map.Entry<TopicPartition, ReplicaInfo> kv:kvs){
    			TopicPartition k = kv.getKey();
    			ReplicaInfo v = kv.getValue();
    			KafkaReplicaInfo replicaInfo = new KafkaReplicaInfo();
    			replicaInfo.topic = k.topic();
    			replicaInfo.partition = k.partition();
    			replicaInfo.size = v.size;
    			replicaInfo.offsetLag = v.offsetLag;
    			replicaInfo.isFuture = v.isFuture;
    			
    			replicaInfoList.add(replicaInfo);	
    		}
    	
    	}
    	//
    	Collections.sort(replicaInfoList);
    	//
    	Map<String,KafkaTopicReplicaSummaryInfo> datamap = new HashMap<>();
    	for(KafkaReplicaInfo item:replicaInfoList){
    		String topic = item.topic;
    		KafkaTopicReplicaSummaryInfo info = datamap.get(topic);
    		if(info==null){
    			info = new KafkaTopicReplicaSummaryInfo();
    			info.topic = topic;
    			datamap.put(topic, info);
    		}
    		info.replicas.add(item);
    		info.totalSize = info.totalSize + item.size;
    	}

    	
    	
    	List<KafkaTopicReplicaSummaryInfo> list = new ArrayList<>();
    	list.addAll(datamap.values());
    	
    	return list;
    }
    
	
    public static  PartitionOffsetInfo getPartitionOffsetInfo(String servers,Collection<TopicPartition> partitions){
		if(StringUtils.isBlank(servers)){
			throw new RuntimeException("servers blank");
		}
    	if(partitions==null || partitions.isEmpty()){
			// return null;
			throw new RuntimeException("TopicPartitions empty");
		}
        Properties props = new Properties();
		
		String groupId = genShowOffsetGroupId();

		props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
		props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
		props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
		// props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
		props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

		KafkaConsumer<String, String> consumer = null;
		
		try{
		consumer = new KafkaConsumer<String, String>(props);
		consumer.assign(partitions);

		
		Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
		Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
		
		PartitionOffsetInfo info = new PartitionOffsetInfo();
		info.beginningOffsets = beginningOffsets;
		info.endOffsets = endOffsets;
		
		return info;
		
		}finally{
			close(consumer);
		}
	}
    
    
    public static List<KafkaOffsetInfo> getOffsetInfos(String groupId)throws Exception{  	
    	
    	if(StringUtils.isBlank(groupId)){
			throw new RuntimeException("groupId blank");
		}
    	final KafkaClient kafkaClient = getClient();
    	final AdminClient adminClientTmp = kafkaClient.adminClient;
    	final String server = kafkaClient.server;
    	
        ListConsumerGroupOffsetsResult listConsumerGroupOffsetsResult = adminClientTmp.listConsumerGroupOffsets(groupId);
		
		Map<TopicPartition, OffsetAndMetadata> partitionsToOffsetAndMetadata = listConsumerGroupOffsetsResult.partitionsToOffsetAndMetadata().get();
        
		if(partitionsToOffsetAndMetadata==null || partitionsToOffsetAndMetadata.isEmpty()){
			return null;
		}
    	Set<TopicPartition> topicPartitions = partitionsToOffsetAndMetadata.keySet();
    	
    	PartitionOffsetInfo partitionOffsetMap =  getPartitionOffsetInfo(server,topicPartitions);
    	Map<TopicPartition, Long> beginningOffsets = partitionOffsetMap.beginningOffsets;
    	Map<TopicPartition, Long> endOffsets = partitionOffsetMap.endOffsets;
    	
    	List<TopicPartition> topicPartitionList = new ArrayList<TopicPartition>(topicPartitions);
    	topicPartitionList.sort(new TopicPartitionComparator());
    	
    	List<KafkaOffsetInfo> list = new ArrayList<>();
    	
    	for(TopicPartition item:topicPartitionList){
    		KafkaOffsetInfo info = new KafkaOffsetInfo();
    		info.topic = item.topic();
    		info.partition = item.partition();
    		OffsetAndMetadata md = partitionsToOffsetAndMetadata.get(item);
    		if(md!=null){
    			info.offset = md.offset();
    			info.metadata = md.metadata();
    		}
    		
    		if(beginningOffsets!=null){
    			Long tmp = beginningOffsets.get(item);
    			info.beginning = tmp;
    		}
    		
    		if(endOffsets!=null){
    			Long tmp = endOffsets.get(item);
    			info.end = tmp;
    		}
    		list.add(info);
    	}
    	
    	
    	return list;
    }
    
    
    private static KafkaBroker buildBroker(Node node){
    	KafkaBroker broker = new KafkaBroker();
    	broker.id = node.id();
    	broker.host = node.host();
    	broker.port = node.port();
    	broker.rack = node.rack();
    	
    	return broker;
    }
    
    public static KafkaClusterInfo getClusterInfo()throws Exception{
        final KafkaClient kafkaClientTmp = getClient();

        final  AdminClient adminClientTmp = kafkaClientTmp.adminClient;
        

    	DescribeClusterResult describeClusterResult = adminClientTmp.describeCluster();	
    	
    	Node controller = describeClusterResult.controller().get();
    	Collection<Node> nodes = describeClusterResult.nodes().get();
    	
    	List<Node> nonControllerNodes = new ArrayList<>();
    	for(Node item:nodes){
    		if(controller!=null && controller.id()==item.id()){
    			continue;
    		}
    		nonControllerNodes.add(item);
    	}
    	
    	// controller 放前面
    	List<KafkaBroker> brokers = new ArrayList<>();
    	
    	KafkaBroker controllerBroker = buildBroker(controller);
    	controllerBroker.controller = true;
    	brokers.add(controllerBroker);
    	
    	for(Node item:nonControllerNodes){
    		brokers.add(buildBroker(item));
    	}

    	
    	KafkaClusterInfo info = new KafkaClusterInfo();
    	info.clusterId = describeClusterResult.clusterId().get();
    	info.controller = controller;
    	info.server = kafkaClientTmp.server;
    	
    	
    	info.brokers = brokers;

    	
    	return info;
    }
    
    public static Map<String, TopicListing> listTopics(boolean listInternal)throws Exception{
        final  AdminClient adminClientTmp = getClient().adminClient;
    	ListTopicsOptions listTopicsOptions = new ListTopicsOptions();
        listTopicsOptions.listInternal(listInternal);
       
        ListTopicsResult listTopicsResult = adminClientTmp.listTopics(listTopicsOptions); 
        KafkaFuture<Map<String, TopicListing>> topicListingFuture = listTopicsResult.namesToListings();
        Map<String, TopicListing> topicListingMap = topicListingFuture.get();
        return topicListingMap;
    }
    
    public static ConsumerGroupsInfo listConsumerGroups()throws Exception{
        final  AdminClient adminClientTmp = getClient().adminClient;

        ListConsumerGroupsOptions listConsumerGroupsOptions = new ListConsumerGroupsOptions();
		
		ListConsumerGroupsResult listConsumerGroupsResult = adminClientTmp.listConsumerGroups(listConsumerGroupsOptions);
		
		Collection<ConsumerGroupListing> allConsumerGroupListing = listConsumerGroupsResult.all().get();
        Collection<ConsumerGroupListing> validConsumerGroupListing = listConsumerGroupsResult.valid().get();
        
        ConsumerGroupsInfo info = new ConsumerGroupsInfo();
        info.allConsumerGroupListing = allConsumerGroupListing;
        info.validConsumerGroupListing = validConsumerGroupListing;
        
        
        if(allConsumerGroupListing==null || allConsumerGroupListing.isEmpty()){
        	return info;
        }
        
        Set<String> validGroupIds = new HashSet<>();
        if(validConsumerGroupListing!=null){
        	String groupId = null;
        	for(ConsumerGroupListing item:validConsumerGroupListing){
        		groupId = item.groupId();
        		if(StringUtils.isBlank(groupId)){
        			continue;
        		}
        		validGroupIds.add(groupId);
        	}
        }
        
        List<ConsumerGroupDTO> consumerGroupDTOList = new ArrayList<>();
        ConsumerGroupDTO dto = null;
        for(ConsumerGroupListing item:allConsumerGroupListing){
    		String groupId = item.groupId();
    		if(StringUtils.isBlank(groupId)){
    			continue;
    		}
    		dto = new ConsumerGroupDTO();
    		dto.groupId = groupId;
    		dto.simpleConsumerGroup = item.isSimpleConsumerGroup();
    		if(validGroupIds.contains(groupId)){
    			dto.valid = true;
    		}else{
    			dto.valid = false;
    		}
    		consumerGroupDTOList.add(dto);
    	}   
        
        info.consumerGroupDTOList = consumerGroupDTOList;
    	
    	return info;
    }
    
    
    public static ConsumerGroupDescription getConsumerGroupDescription(String groupId)throws Exception{
    	if(StringUtils.isBlank(groupId)){
			// throw new RuntimeException("groupId blank");
    		return null;
		}
    	List<String> groupIds = new ArrayList<>();
        groupIds.add(groupId);
        
        final  AdminClient adminClientTmp = getClient().adminClient;

        DescribeConsumerGroupsResult describeConsumerGroupsResult = adminClientTmp.describeConsumerGroups(groupIds);
        
        Map<String, ConsumerGroupDescription>  consumerGroupDescriptionMap = describeConsumerGroupsResult.all().get();
        if(consumerGroupDescriptionMap==null){
        	return null;
        }
    	
    	return consumerGroupDescriptionMap.get(groupId);
    }
    
    
    public static String toNodeString(List<Node> list,String sep){
    	if(list==null || list.isEmpty()){
    		return "";
    	}
    	if(sep==null){
    		sep = ";";
    	}
    	return StringUtils.join(list.iterator(), sep);
    }
    
    public static String toTopicPartiotionString(Set<TopicPartition> tps,String sep){
    	if(tps==null || tps.isEmpty()){
    		return "";
    	}
    	if(sep==null){
    		sep = ";";
    	}
    	List<TopicPartition> list = new ArrayList<>(tps);
    	list.sort(new TopicPartitionComparator());
    	
    	return StringUtils.join(list.iterator(), sep);
    }
	

    /*
	
	@Deprecated
	public static ByteBuffer send(String host, int port, AbstractRequest request, ApiKeys apiKey) throws IOException {
		Socket socket = connect(host, port);
		try {
			return send(request, apiKey, socket);
		} finally {
			socket.close();
		}
	}

	private static byte[] issueRequestAndWaitForResponse(Socket socket, byte[] request) throws IOException {
		sendRequest(socket, request);
		return getResponse(socket);
	}

	private static void sendRequest(Socket socket, byte[] request) throws IOException {
		DataOutputStream dos = new DataOutputStream(socket.getOutputStream());
		dos.writeInt(request.length);
		dos.write(request);
		dos.flush();
	}

	private static byte[] getResponse(Socket socket) throws IOException {
		DataInputStream dis = null;
		try {
			dis = new DataInputStream(socket.getInputStream());
			byte[] response = new byte[dis.readInt()];
			dis.readFully(response);
			return response;
		} finally {
			if (dis != null) {
				dis.close();
			}
		}
	}

	private static Socket connect(String hostName, int port) throws IOException {
		return new Socket(hostName, port);
	}

	private static ByteBuffer send(AbstractRequest request, ApiKeys apiKey, Socket socket) throws IOException {
		RequestHeader header = new RequestHeader(apiKey, request.version(), "client-id", 0);

		ByteBuffer buffer = request.serialize(header);


		byte[] serializedRequest = buffer.array();
		byte[] response = issueRequestAndWaitForResponse(socket, serializedRequest);
		ByteBuffer responseBuffer = ByteBuffer.wrap(response);
		ResponseHeader.parse(responseBuffer);
		return responseBuffer;
	}

*/

	public static void main(String[] args) throws Exception {
		
		System.out.println(ApiKeys.LIST_GROUPS.oldestVersion()+","+ApiKeys.LIST_GROUPS.latestVersion());

		short version = (short)1;
		// version = ApiKeys.LIST_GROUPS.latestVersion();
		
		/*
		ListGroupsRequest listGroupsRequest = new ListGroupsRequest.Builder().build(version);
		
		ByteBuffer response = send("localhost", 9092, listGroupsRequest, ApiKeys.LIST_GROUPS);
		
		ListGroupsResponse listGroupsResponse = ListGroupsResponse.parse(response, version);
		
		System.out.println(listGroupsResponse);
		
		List<Group> groups = listGroupsResponse.groups();
		for(Group item:groups){
			System.out.println(item.groupId()+","+item.protocolType());
		}
		///
		List<String> groupIds = new ArrayList<>();
		groupIds.add("kafkademo_test_group");
		DescribeGroupsRequest describeGroupsRequest = new DescribeGroupsRequest.Builder(groupIds).build(version);
		response = send("localhost", 9092, describeGroupsRequest, ApiKeys.DESCRIBE_GROUPS);
		
		DescribeGroupsResponse describeGroupsResponse = DescribeGroupsResponse.parse(response, version);
		
		System.out.println(describeGroupsResponse.groups());

		System.out.println(JSON.toJSONString(describeGroupsResponse));

		
		// ListOffsetRequest listOffsetRequest = new ListOffsetRequest()
		
		// Exception in thread "main" org.apache.kafka.common.errors.UnsupportedVersionException: The broker only supports OffsetFetchRequest v1, but we need v2 or newer to request all topic partitions.

		String groupId = "kafkademo_test_group";
		OffsetFetchRequest offsetFetchRequest = OffsetFetchRequest.Builder.allTopicPartitions(groupId).build(version);
		response = send("localhost", 9092, offsetFetchRequest, ApiKeys.OFFSET_FETCH);
		
		OffsetFetchResponse offsetFetchResponse = OffsetFetchResponse.parse(response, version);
		System.out.println(offsetFetchResponse);

		Map<TopicPartition, PartitionData> responseData = offsetFetchResponse.responseData();
		
		System.out.println(responseData);
		
		Set<TopicPartition> keys = responseData.keySet();
		for(TopicPartition item:keys){
			PartitionData pd = responseData.get(item);
			System.out.println(item.topic()+"-"+item.partition()+"="+pd.offset+","+pd.metadata);

		}
		
		*/

	}

}
