package com.kafka.provider.config;

import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.TopicBuilder;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.requestreply.ReplyingKafkaTemplate;

import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.regex.Pattern;


/**
 * @作者: 林江
 * @创建时间: 2024/3/27
 * @功能:
 */
@Configuration
public class KafkaPoviderConfig {

    /**
     * 初始化kafka的上下文。
     *
     * @return
     */
    @Bean
    public KafkaAdmin kafkaAdmin() {
        Map<String, Object> configs = new HashMap<>();
        // 配置kafka borker连接。
        configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.32.128:9092");
        KafkaAdmin admin = new KafkaAdmin(configs);
        admin.setFatalIfBrokerNotAvailable(true);
        return admin;
    }


    /*---------------------------------topic的创建-------------------------------------*/
    @Bean
    public NewTopic topic() {
        return TopicBuilder.name("one").partitions(1).replicas(1).build();
    }

    @Bean
    public KafkaAdmin.NewTopics topics() {
        return new KafkaAdmin.NewTopics(
                TopicBuilder.name("tow").partitions(1)
                        .replicas(1).build(),
                TopicBuilder.name("domain").partitions(1)
                        .replicas(1).build()
        );
//        List<NewTopic> list = new ArrayList<>();
//        for (int i = 0; i < 10; i++) {
//            list.add(
//                    TopicBuilder.name("topic0" + i)
//                            .partitions(1)
//                            .replicas(1)
//                            .build()
//            );
//        }
//        return new KafkaAdmin.NewTopics(
//                list.toArray(new NewTopic[0])
//        );
    }
    /*---------------------------------生产者工厂-----------------------------------------------*/

    @Bean
    public DefaultKafkaProducerFactory defaultKafkaProducerFactory() {
        DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory(productConfigs());
        // 为每个线程单独创建一份ProducerFactory。避免flush的时候，共用ProducerFactory时，出现问题。
        factory.setProducerPerThread(true);

        // 就是关闭现有的生产者，提供具有全新属性的生产者。
//        factory.reset();
//        factory.updateConfigs();

        return factory;
    }

    public Map productConfigs() {
        Map map = new HashMap();
        // 生产者连接服务器的地址。
        map.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.32.128:9092");
        // 对键的序列化。向Str序列化转换。
        map.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        // 映射值的序列化 --- 向JSON方向序列化。
        map.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//        map.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
        // 生产者拦截器配置 -- 集群中
//        map.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, Arrays.asList(new CustomProducerInterceptor()));
        return map;
    }

    @Bean
    public KafkaTemplate<Object, Object> kafkaTemplate(ProducerFactory<Object, Object> pf) {
        KafkaTemplate template = new KafkaTemplate(defaultKafkaProducerFactory());
//        KafkaTemplate template = new KafkaTemplate(pf);
        return template;
    }


    /*------------------------------运行时选择topic发送数据--------------------------------------*/
//    @Bean
    public RoutingKafkaTemplate routingTemplate(GenericApplicationContext context,
                                                ProducerFactory<Object, Object> pf) {
        Map<String, Object> configs = new HashMap<>(pf.getConfigurationProperties());
        configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);

        // 向spring上下文中注册bean定义
        DefaultKafkaProducerFactory<Object, Object> bytesPF = new DefaultKafkaProducerFactory<>(configs);
        context.registerBean("bytesPF", DefaultKafkaProducerFactory.class, () -> bytesPF);

        // 路由时的正则匹配
        Map<Pattern, ProducerFactory<Object, Object>> map = new LinkedHashMap<>();
        map.put(Pattern.compile("tow"), bytesPF);
        // 使用默认生产者工厂的序列化值。
        map.put(Pattern.compile(".+"), pf);
        return new RoutingKafkaTemplate(map);
    }

    /*----------------------ReplyingKafkaTemplate 发送请求并且有响应的发送-----------------------------*/
    @Bean
    public ReplyingKafkaTemplate<String, String, String> replyingTemplate(
            ProducerFactory<String, String> pf,
            ConcurrentMessageListenerContainer<String, String> repliesContainer) {
        ReplyingKafkaTemplate<String, String, String> replyingKafkaTemplate = new ReplyingKafkaTemplate<>(pf, repliesContainer);
        // 响应回来的数据是否有错的检查
        replyingKafkaTemplate.setReplyErrorChecker(consumerRecord -> {
            // serverSentAnError --> 错误的标识
            Header error = consumerRecord.headers().lastHeader("serverSentAnError");
            if (error != null) {
                return new RuntimeException("有错误");
            } else {
                return null;
            }
        });
        return replyingKafkaTemplate;
    }

    @Bean
    public ConcurrentMessageListenerContainer<String, String> repliesContainer(
            ConcurrentKafkaListenerContainerFactory<String, String> containerFactory) {
        ConcurrentMessageListenerContainer<String, String> repliesContainer =
                containerFactory.createContainer("reply");
        repliesContainer.getContainerProperties().setGroupId("replyGroup");
        repliesContainer.setAutoStartup(false);
        return repliesContainer;
    }

}
