package com.kafka.study.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class CConfig {
    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;

    @Value("${spring.kafka.consumer.key-deserializer}")
    private String keyDeserializer;

    @Value("${spring.kafka.consumer.value-deserializer}")
    private String valueDeserializer;

    @Value("${spring.kafka.consumer.time.group-id}")
    private String timeGroupId;

    @Value("${spring.kafka.consumer.time.auto-offset-reset}")
    private String timeAutoOffsetReset;

    @Value("${spring.kafka.consumer.time.enable-auto-commit}")
    private boolean timeEnableAutoCommit;

    @Value("${spring.kafka.consumer.time.max-poll-records}")
    private int timeMaxPollRecords;

    @Value("${spring.kafka.consumer.time.fetch-max-wait}")
    private int timeFetchMaxWait;

    @Value("${spring.kafka.consumer.time.fetch-min-bytes}")
    private int timeFetchMinBytes;

    @Value("${spring.kafka.consumer.time.fetch-max-bytes}")
    private int timeFetchMaxBytes;

    @Value("${spring.kafka.consumer.business.group-id}")
    private String businessGroupId;

    @Value("${spring.kafka.consumer.business.auto-offset-reset}")
    private String businessAutoOffsetReset;

    @Value("${spring.kafka.consumer.business.enable-auto-commit}")
    private boolean businessEnableAutoCommit;

    @Value("${spring.kafka.consumer.business.max-poll-records}")
    private int businessMaxPollRecords;

    @Value("${spring.kafka.consumer.business.fetch-max-wait}")
    private int businessFetchMaxWait;

    @Value("${spring.kafka.consumer.business.fetch-min-bytes}")
    private int businessFetchMinBytes;

    @Value("${spring.kafka.consumer.business.fetch-max-bytes}")
    private int businessFetchMaxBytes;

    @Value("${spring.kafka.consumer.dead.group-id}")
    private String deadGroupId;

    @Value("${spring.kafka.consumer.dead.auto-offset-reset}")
    private String deadAutoOffsetReset;

    @Value("${spring.kafka.consumer.dead.enable-auto-commit}")
    private boolean deadEnableAutoCommit;

    @Value("${spring.kafka.consumer.dead.max-poll-records}")
    private int deadMaxPollRecords;

    @Value("${spring.kafka.consumer.dead.fetch-max-wait}")
    private int deadFetchMaxWait;

    @Value("${spring.kafka.consumer.dead.fetch-min-bytes}")
    private int deadFetchMinBytes;

    @Value("${spring.kafka.consumer.dead.fetch-max-bytes}")
    private int deadFetchMaxBytes;

    @Bean("timeConsumerFactory")
    public ConsumerFactory<String, String> timeConsumerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        configProps.put(ConsumerConfig.GROUP_ID_CONFIG, timeGroupId);
        configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, timeAutoOffsetReset);
        configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, timeEnableAutoCommit); // 手动提交偏移量
        configProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, timeMaxPollRecords); // 批量拉取大小
        configProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, timeFetchMaxWait);
        configProps.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, timeFetchMinBytes); // 拉取的最小字节数
        configProps.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, timeFetchMaxBytes); // 拉取最大字节数
        return new DefaultKafkaConsumerFactory<>(configProps);
    }

    @Bean("businessConsumerFactory")
    public ConsumerFactory<String, String> businessConsumerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        configProps.put(ConsumerConfig.GROUP_ID_CONFIG, businessGroupId);
        configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, businessAutoOffsetReset);
        configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, businessEnableAutoCommit); // 手动提交偏移量
        configProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, businessMaxPollRecords); // 批量拉取大小
        configProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, businessFetchMaxWait);
        configProps.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, businessFetchMinBytes); // 拉取的最小字节数
        configProps.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, businessFetchMaxBytes); // 拉取最大字节数
        return new DefaultKafkaConsumerFactory<>(configProps);
    }

    @Bean("deadConsumerFactory")
    public ConsumerFactory<String, String> deadConsumerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        configProps.put(ConsumerConfig.GROUP_ID_CONFIG, deadGroupId);
        configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, deadAutoOffsetReset);
        configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, deadEnableAutoCommit); // 手动提交偏移量
        configProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, deadMaxPollRecords); // 批量拉取大小
        configProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, deadFetchMaxWait);
        configProps.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, deadFetchMinBytes); // 拉取的最小字节数
        configProps.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, deadFetchMaxBytes); // 拉取最大字节数
        return new DefaultKafkaConsumerFactory<>(configProps);
    }

    @Bean("businessKafkaListenerContainer")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> businessKafkaListenerContainer() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(businessConsumerFactory());
        factory.setConcurrency(Runtime.getRuntime().availableProcessors() * 2); // 设置消费者并发数
        factory.setBatchListener(false); // 不启用批量消费
        return factory;
    }

    @Bean("deadKafkaListenerContainer")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> deadKafkaListenerContainer() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(deadConsumerFactory());
        factory.setConcurrency(Runtime.getRuntime().availableProcessors() * 2); // 设置消费者并发数
        factory.setBatchListener(false); // 不启用批量消费
        return factory;
    }
}
