package com.xaicode.ctoroad.kafka;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewPartitions;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartitionInfo;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * kafka监听
 * <p>
 *     人工提交模式:
 *     kafka和springboot结合中的enable.auto.commit为false为spring的人工提交模式.
 *     参数中加入 <code>Acknowledgment ack</code> 获取提交确认，须配合
 *     <pre>factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);</pre>
 * </p>
 *
 * @author Locker cjxia@isoftstone.com
 * @version 1.0
 */
@Slf4j
@Component
public class KafkaListeners {

    private final AdminClient adminClient;

    public KafkaListeners(AdminClient adminClient) {
        this.adminClient = adminClient;
    }

    @Value("${spring.kafka.topic.max-partitions}")
    private int kafkaTopicMaxPartitions;

    /**
     * 检查主题和分区，配置指定分区数量
     */
    @PostConstruct
    public void refreshTopics() {

        adminClient.listTopics().names()
                .whenComplete((currentTopics, throwable) -> {
                    log.info("kafkaConfig adminClient completed, current topics are : {}", currentTopics);

                    // 已存在的topic检查分区，将其分区个数设置为配置文件中指定的个数
                    adminClient.describeTopics(currentTopics).all().whenComplete((descriptionMap, throwable1) -> {
                        for (Map.Entry<String, TopicDescription> entry : descriptionMap.entrySet()) {
                            List<TopicPartitionInfo> tps = entry.getValue().partitions();
                            log.info("topic {} with description {}, current partition count is {}", entry.getKey(), tps, tps.size());
                            if (tps.size() < kafkaTopicMaxPartitions) {
                                log.info("Increase the partition count for a topic {} to {}", entry.getKey(), kafkaTopicMaxPartitions);
                                NewPartitions newPartitions = NewPartitions.increaseTo(kafkaTopicMaxPartitions);
                                Map<String, NewPartitions> partitionsMap = new HashMap<>();
                                partitionsMap.put(entry.getKey(), newPartitions);
                                adminClient.createPartitions(partitionsMap);
                            }
                        }
                    });
                });

    }

    @KafkaListener(topicPattern = "test.*")
    public void listen2(ConsumerRecord<?, ?> record, Acknowledgment ack) {

        System.out.println("--- " + new Date(System.currentTimeMillis()) + " ---");
        System.out.printf("test.* = %s, key = %s, offset = %d, value = %s \n", record.topic(), record.topic(), record.offset(), record.value());

        // 手动确认ack
        ack.acknowledge();
    }

}
