package com.jiepuxun.demo.config;

import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.config.TopicBuilder;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.HashMap;
import java.util.Map;

@Configuration
public class KafkaConfig {

//    @Bean
//    KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>>
//    kafkaListenerContainerFactory() {
//        ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
//                new ConcurrentKafkaListenerContainerFactory<>();
//        factory.setConsumerFactory(consumerFactory());
//        factory.setConcurrency(3);
//        factory.getContainerProperties().setPollTimeout(3000);
//        return factory;
//    }
//
//    @Bean
//    public ConsumerFactory<Integer, String> consumerFactory() {
//        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
//    }
//
//    @Bean
//    public Map<String, Object> consumerConfigs() {
//        Map<String, Object> props = new HashMap<>();
//        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "");
//        return props;
//    }
//
//    @Bean
//    public KafkaAdmin admin(KafkaProperties properties){
//        KafkaAdmin admin = new KafkaAdmin(properties.buildAdminProperties());
//        admin.setFatalIfBrokerNotAvailable(true);
//        return admin;
//    }

    /**
     * 初始化topic
     * 实际在执行代码kafkaTemplate.send("topic",message)发送消息时，Kafka会帮我们自动完成topic的
     * 创建工作，但这种情况下创建的topic默认只有一个分区，无副本。因此，可以通过配置设置分区和副本
     * @return
     */
    @Bean("test")
    public NewTopic test(){
        return TopicBuilder.name("test")    //topic name
                .partitions(3) //分区数
                .compact()
                .build();
    }

}
