package com.tyc.kafka.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import java.util.Arrays;
import java.util.Properties;

/**
 * 类描述
 *
 * @author tyc
 * @version 1.0
 * @date 2022-10-19 13:31:12
 */
@Configuration
public class KafkaConfig {
    @Value("${kafka.group.id}")
    private String kafkaGroupId;

    @Value("${kafka.group.topics}")
    private String topics;

    @Value("${kafka.group.servers}")
    private String kafkaGroups;

    @Bean("kafkaConsumer")
    public KafkaConsumer<String, String> initKafkaConsumer() {
        Properties properties = new Properties();
        // 心跳间隔时间 通常为session.timeout.ms的三分之一
        properties.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG,1000 * 20);
        /**
         * 会话超时时长 默认为5分钟
         * 如果在此时间内，server尚未接收到consumer任何请求（包括心跳检测），
         * 那么server将会判定此consumer离线。此值越大，server等待consumer失效、rebalance时间就越长。
         */
        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000 * 60);
        // 每次拉取最大消息量（默认500）
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 300);
        // 两次拉取时间最大值 超过这个值 会触发 rebalance 下次拉取还会拉到上一次的数据（默认5分钟）
        properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,5 * 60 * 1000);
        //自动提交
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
        //自动提交提交时间间隔 拉取数据时若当前时间与上次提交时间大于此配置值则提交
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,3000);
        // 设置从头开始接收 earliest latest最新的
        properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("bootstrap.servers", kafkaGroups);
        properties.setProperty("group.id", kafkaGroupId);
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Arrays.asList(topics.split(",")));
        return consumer;
    }

    @Bean("kafkaProducer")
    public KafkaProducer<String, String> initKafkaProducer() {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaGroups);
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        //创建生产者
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);
        return kafkaProducer;
    }
}
