package com.xiangxiao.rpan.data.config;

import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.*;

import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;

/**
 * @Projectname: rpan-data
 * @Author: xiangxiao
 * @Email: 573768011@qq.com
 * @Data:2024/5/31 1:13
 */
@EnableKafka
@Configuration
public class KafkaConfig {
  @Value("${spring.cloud.stream.bindings.rpan-data.group:rpan-data-new}")
  private String groupId;
  @Value("${kafka.consumer.fetch.max.bytes:62914560}")
  private int fetchSize;
  @Value("${kafka.consumer.auto.offset.reset:latest}")
  private String autoOffsetReset;
  @Value("${kafka.consumer.maxPollRecordsConfig:850}")
  private int maxPollRecordsConfig;
  @Value("${kafka.consumers.number:2}")
  private int consumers;
  @Value("${kafka.consumer.poll.timeout:3000}")
  private long pollTimeout;

  @Value("${kafka.consumer.fetch.min.bytes:8388608}")
  private int fetchMinBytes;
  @Value("${kafka.consumer.fetch.max.wait.ms:6500}")
  private int fetchMaxWaitMS;

  private static final int THREAD_NUM = 20;
  private static final ThreadFactory THREAD_FACTORY =
    new ThreadFactoryBuilder().setNameFormat("rpan-data topic consumer threadPool-%d").build();

  @Bean("kafkaOneContainerFactory")
  KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaOneContainerFactory() {
    ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
    factory.setConsumerFactory(consumerFactory());
    factory.setConcurrency(consumers);
    factory.getContainerProperties().setPollTimeout(pollTimeout);
    factory.setBatchListener(true);
    factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
    return factory;
  }

  private ConsumerFactory<Integer, String> consumerFactory() {
    return new DefaultKafkaConsumerFactory<>(consumerConfigs());
  }

  private Map<String, Object> consumerConfigs() {
    Map<String, Object> props = new HashMap<>(16);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
    props.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, fetchSize);
    props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, fetchSize);
    props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, fetchMinBytes);
    props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, fetchMaxWaitMS);
    props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecordsConfig);
    return props;
  }

  @Bean("rpanDataConsumerExecutors")
  ExecutorService rpanDataConsumerExecutors() {
    return new ThreadPoolExecutor(THREAD_NUM * consumers, THREAD_NUM * consumers,
      0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(THREAD_NUM * consumers * 50),
      THREAD_FACTORY, new ThreadPoolExecutor.CallerRunsPolicy());
  }
}
