package com.hefei.garden.config.kafka.config;

import com.hefei.garden.pojo.KafkaBaseDto;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.util.Properties;

/**
 * kafka 服务启动配置获取
 *
 * @author devil
 */
public class InitConfig {

    public static final String BROKER_LIST = "127.0.0.1:9092";

    public static Properties initConsumerConfig(KafkaBaseDto kafkaBaseDto) {
        Properties properties = new Properties();
        // 服务地址
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKER_LIST);
        // 服务组
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaBaseDto.getGroupId());
        // 反序列化
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // poll一次返回多少条record，默认500条
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
        // 设置自动提交offset
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        // 多久自动提交offset
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
        // 设置consumer重启后，从分区最新的offset读取
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        return properties;
    }

//    @Value("${spring.kafka.servers}")
//    private static String servers;
//    @Value("${spring.kafka.producer.retries}")
//    private static int retries;
//    @Value("${spring.kafka.producer.batchSize}")
//    private static int batchSize;
//    @Value("${spring.kafka.producer.linger}")
//    private static int linger;
//    @Value("${spring.kafka.producer.bufferMemory}")
//    private static int bufferMemory;
//    @Value("${spring.kafka.producer.acks}")
//    private static String acks;
//    @Value("${spring.kafka.producer.maxRequestSize}")
//    private static String maxRequestSize;
//    @Value("${spring.kafka.producer.compressionType}")
//    private static String compressionType;
//    @Value("${spring.kafka.producer.connectionsMaxIdleMs}")
//    private static String connectionsMaxIdleMs;
//    @Value("${spring.kafka.producer.receiveBufferBytes}")
//    private static String receiveBufferBytes;
//    @Value("${spring.kafka.producer.sendBufferBytes}")
//    private static String sendBufferBytes;
//    @Value("${spring.kafka.producer.requestTimeoutMs}")
//    private static String requestTimeoutMs;
//
//    public static Properties initProducer(KafkaBaseDto kafkaBaseDto) {
//        Properties properties = new Properties();
//        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
//        properties.put(ProducerConfig.RETRIES_CONFIG, retries);
//        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
//        properties.put(ProducerConfig.LINGER_MS_CONFIG, linger);
//        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
//        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//        properties.put(ProducerConfig.ACKS_CONFIG, acks);
//        properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, maxRequestSize);
//        properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
//        properties.put(ProducerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG, connectionsMaxIdleMs);
//        properties.put(ProducerConfig.RECEIVE_BUFFER_CONFIG, receiveBufferBytes);
//        properties.put(ProducerConfig.SEND_BUFFER_CONFIG, sendBufferBytes);
//        properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs);
//        // 拦截器
//        properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, getAllInterceptor());
//        return properties;
//    }
//
//    /**
//     * 获取所有实现的拦截器
//     */
//    @SneakyThrows
//    public static String getAllInterceptor() {
//        String basePackage = "com.hefei.garden";
//        // spring提供的方法获取所有KafkaProducerInterceptor的子类
//        ClassPathScanningCandidateComponentProvider provider = new ClassPathScanningCandidateComponentProvider(false);
//        provider.addIncludeFilter(new AssignableTypeFilter(ProducerInterceptor.class));
//        Set<BeanDefinition> components = provider.findCandidateComponents(basePackage);
//        if (CollectionUtils.isEmpty(components)) {
//            return null;
//        }
//        List<KafkaOrderDefinition> interceptors = Lists.newArrayList();
//        for (BeanDefinition component : components) {
//            // 获取拦截器类名
//            String beanClassName = component.getBeanClassName();
//            Class<?> aClass = Class.forName(beanClassName);
//            // 获取类注解
//            KafkaInterceptorOrder interceptorAnnotation = aClass.getAnnotation(KafkaInterceptorOrder.class);
//            int order = interceptorAnnotation.order();
//            KafkaOrderDefinition kafkaOrderDefinition = new KafkaOrderDefinition();
//            kafkaOrderDefinition.setName(beanClassName);
//            kafkaOrderDefinition.setOrder(order);
//            // 便于最后的排序
//            interceptors.add(kafkaOrderDefinition);
//        }
//        // 根据order进行排序
//        interceptors.sort(Comparator.comparing(KafkaOrderDefinition::getOrder));
//        StringJoiner stringJoiner = new StringJoiner(",");
//        interceptors.forEach(kafkaOrderDefinition -> stringJoiner.add(kafkaOrderDefinition.getName()));
//        return stringJoiner.toString();
//    }

}
