package com.flowcloud.kafka.config;

import cn.hutool.core.collection.CollectionUtil;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.core.env.Environment;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.util.StringUtils;

import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.UUID;

/**
 * @program: uip
 * @description: kafka配置类
 * @author: wuwenbin
 * @create: 2020-12-19 15:11
 */
@Configuration
@EnableConfigurationProperties(TopicConfigurations.class)
@EnableKafka
public class UipKafKaConfigure {
	private static final String OFFSET_RESET_EARLIEST = "earliest";
	private static final String OFFSET_RESET_LATEST = "latest";
	@Resource
	private KafkaProperties kafkaProperties;
	@Autowired
	private TopicConfigurations topicConfigurations;
	private final GenericApplicationContext context;

	private Environment environment = null;

	public UipKafKaConfigure(GenericApplicationContext context) throws BeansException {
		this.context = context;
		this.environment = context.getBean(Environment.class);
	}

	/**
	 * 配置并发度,监听设置2个分区
	 */
	private Integer concurrency = 2;
	/**
	 * 拉取topic的超时时间(默认5000):  4000
	 */
	private Integer pollTimeout = 5000;
	/**
	 * 实时推送单次批量拉取数据: 100
	 */
	private Integer maxPollRecords = 100;
	/**
	 * 自动提交间隔(毫秒) 1000
	 */
	private Integer autoCommitIntervalMs = 1000;

	/**
	 * 通过构造函数，读取配置文件中的值
	 */
	@PostConstruct
	public void getKafKaConfig() {
		initializeBeans(topicConfigurations.getTopics());
		this.concurrency = environment.getProperty("spring.kafka.listener.concurrency") == null
				? concurrency : Integer.valueOf(Objects.requireNonNull(environment.getProperty("spring.kafka.listener.concurrency")));
		this.pollTimeout = environment.getProperty("spring.kafka.listener.poll-timeout") == null
				? pollTimeout : Integer.valueOf(environment.getProperty("spring.kafka.listener.poll-timeout"));
		this.maxPollRecords = environment.getProperty("spring.kafka.consumer.max-poll-records") == null
				? maxPollRecords : Integer.valueOf(environment.getProperty("spring.kafka.consumer.max-poll-records"));
		this.autoCommitIntervalMs = environment.getProperty("spring.kafka.consumer.auto-commit-interval") == null
				? autoCommitIntervalMs : Integer.valueOf(environment.getProperty("spring.kafka.consumer.auto-commit-interval"));
	}


	@Bean
	public KafkaTemplate<String, Object> kafkaTemplate() {
		Map<String, Object> producerProperties = kafkaProperties.buildProducerProperties();
		producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);

		return new KafkaTemplate<>(new DefaultKafkaProducerFactory(producerProperties));
	}

	/**
	 * 默认消费方式，使用服务名作为组名，使用JSON作为序列化方式
	 *
	 * @return
	 */
	@Bean
	public ConsumerFactory<String, Object> kafkaConsumerFactory() {
		return ListenerContainerFactoryBuilder.<String, Object>of(kafkaProperties)
				.init()
				.withConsumerFactoryCreator(DefaultKafkaConsumerFactory::new)
				.withConsumerOffsetReset(OFFSET_RESET_EARLIEST)
				.withConsumerGroupId(environment.getProperty("spring.application.name"))
				.buildConsumerFactory();
	}

	/**
	 * 通用消费方式，使用服务名作为组名，使用JSON作为序列化方式
	 * servername 作为groupid，自动批量提交，消费模式earliest, 批量消费
	 * 仅设置自动提交为false，并不会取消自动提交，会按照BATCH模式提交offset
	 *
	 * @return
	 */
	@Bean
	public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> commonListenerContainerFactory() {
		return ListenerContainerFactoryBuilder.<String, Object>of(kafkaProperties)
				.init()
				.withConsumerFactoryCreator(DefaultKafkaConsumerFactory::new)
				.withConsumerOffsetReset(OFFSET_RESET_EARLIEST)
				.withConsumerGroupId(environment.getProperty("spring.application.name"))
				.withConsumerAutoCommit(false)
				.withConsumerMaxPollRecords(maxPollRecords)
				.withListenerBatchListener(true)
				.withListenerConcurrency(concurrency)
				.withListenerPollTimeout(pollTimeout)
				.build();
	}

	/**
	 * servername 作为groupid，手动提交，消费模式earliest,
	 *
	 * @return factory
	 */
	@Bean
	public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> serverNameGroupOneConcurrencyManualEarliestListenerFactory() {
		return ListenerContainerFactoryBuilder.<String, Object>of(kafkaProperties)
				.init()
				.withConsumerFactoryCreator(DefaultKafkaConsumerFactory::new)
				.withConsumerOffsetReset(OFFSET_RESET_EARLIEST)
				.withConsumerGroupId(environment.getProperty("spring.application.name"))
				.withListenerConcurrency(1)
				.withListenerPollTimeout(pollTimeout)
				.withListenerAckMode(ContainerProperties.AckMode.MANUAL)
				.build();
	}

	/**
	 * 以ip：port作为组id，无并发手动提交
	 *
	 * @return factory
	 */
	@Bean
	public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> ipGroupOneConcurrencyManualListenerFactory() {
		return ListenerContainerFactoryBuilder.<String, Object>of(kafkaProperties)
				.init()
				.withConsumerFactoryCreator(DefaultKafkaConsumerFactory::new)
				.withConsumerOffsetReset(OFFSET_RESET_EARLIEST)
				.withConsumerGroupId(getIPAddress() + ":" + getProjectPort())
				.withListenerAckMode(ContainerProperties.AckMode.MANUAL)
				.withListenerConcurrency(1)
				.withListenerPollTimeout(pollTimeout)
				.build();
	}

	/**
	 * 以ip:port为组，批量消费,并发手动提交,取最新的数据消费
	 *
	 * @return factory
	 */
	@Bean
	public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> ipGroupLatestManualBatchListenerFactory() {
		return ListenerContainerFactoryBuilder.<String, Object>of(kafkaProperties)
				.init()
				.withConsumerFactoryCreator(DefaultKafkaConsumerFactory::new)
				.withConsumerOffsetReset(OFFSET_RESET_LATEST)
				.withConsumerGroupId(getIPAddress() + ":" + getProjectPort())
				.withConsumerMaxPollRecords(maxPollRecords)
				.withListenerAckMode(ContainerProperties.AckMode.MANUAL)
				.withListenerBatchListener(true)
				.withListenerConcurrency(concurrency)
				.withListenerPollTimeout(pollTimeout)
				.build();
	}

	/**
	 * 获取IP地址
	 *
	 * @return String
	 */
	private static String getIPAddress() {
		try {
			InetAddress address = InetAddress.getLocalHost();
			if (address != null && !StringUtils.isEmpty(address.getHostAddress())) {
				return address.getHostAddress();
			}
		} catch (UnknownHostException e) {
			return UUID.randomUUID().toString().replace("-", "");
		}

		return UUID.randomUUID().toString().replace("-", "");
	}

	private void initializeBeans(List<TopicConfigurations.Topic> topics) {
		if (CollectionUtil.isEmpty(topics)) {
			return;
		}
		topics.forEach(t -> context.registerBean(t.name, NewTopic.class, t::toNewTopic));
	}

	/**
	 * 获取项目的部署端口号
	 *
	 * @return String
	 */
	private String getProjectPort() {
		return environment.getProperty("server.port");
	}

}
