package com.demo.config;

import com.demo.constant.KafkaCode;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class KafkaConfig {

	//	https://blog.csdn.net/qq_36027670/article/details/79488880
    //  https://segmentfault.com/a/1190000019733221?utm_source=tag-newest
	//	https://blog.csdn.net/qq_19734597/article/details/82250521

	//  生产者配置
	@Bean
	public Map<String, Object> producerConfigs() {
		Map<String, Object> props = new HashMap<>();
			props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaCode.BOOTSTRAP_SERVERS);
			props.put(ProducerConfig.RETRIES_CONFIG, 0);// 生产者重试次数
            props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 6000);// 生产者空间不足时，send()被阻塞的时间，默认60s
			props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);// 指定ProducerBatch（消息累加器中BufferPool中的）可复用大小
			props.put(ProducerConfig.LINGER_MS_CONFIG, 1);// 生产者会在ProducerBatch被填满或者等待超过LINGER_MS_CONFIG时发送
			props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40960);//  消息缓存
			props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
			props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
            props.put(ProducerConfig.CLIENT_ID_CONFIG, "producer.client.id.topinfo");// 客户端id
		return props;
	}

	//  生产者工厂
	@Bean
	public ProducerFactory<String, String> producerFactory() {
		return new DefaultKafkaProducerFactory(producerConfigs());
	}

	//  消费者配置
	@Bean
	public Map<String, Object> consumerConfigs() {
		Map<String, Object> props = new HashMap();
			/*
			earliest:当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费
			latest:当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的该分区下的数据
			none:topic各分区都存在已提交的offset时，从offset后开始消费；只要有一个分区不存在已提交的offset，则抛出异常
			 */
			props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");// 位移丢失和位移越界后的恢复起始位置

			props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaCode.BOOTSTRAP_SERVERS);
			props.put(ConsumerConfig.GROUP_ID_CONFIG, "tmp_id");
			props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);// 自动位移提交
			props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);// 自动位移提交间隔时间
			props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");// 消费组失效超时时间
			props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
			props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		return props;
	}

	@Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory();
            // 设置消费者工厂
            factory.setConsumerFactory(consumerFactory());
            // 要创建的消费者数量(2 个线程并发处理)
            factory.setConcurrency(2);
            factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }

	//  消费者工厂
	@Bean
	public ConsumerFactory<String, String> consumerFactory() {
		return new DefaultKafkaConsumerFactory(consumerConfigs());
	}

	/* --------------kafka template configuration-----------------**/
	@Bean
	public KafkaTemplate<String,String> kafkaTemplate() {
		KafkaTemplate<String, String> kafkaTemplate = new KafkaTemplate(producerFactory());
		return kafkaTemplate;
	}

}