//package com.example.demo.config;
//
//import lombok.extern.slf4j.Slf4j;
//import org.apache.kafka.clients.consumer.ConsumerConfig;
//import org.apache.kafka.clients.producer.ProducerConfig;
//import org.apache.kafka.common.serialization.StringDeserializer;
//import org.apache.kafka.common.serialization.StringSerializer;
//import org.springframework.beans.factory.annotation.Value;
//import org.springframework.context.annotation.Bean;
//import org.springframework.context.annotation.Configuration;
//import org.springframework.context.annotation.DependsOn;
//import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
//import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
//import org.springframework.kafka.core.DefaultKafkaProducerFactory;
//import org.springframework.kafka.core.KafkaTemplate;
//import org.springframework.kafka.listener.ContainerProperties;
//
//import javax.annotation.Resource;
//import java.util.HashMap;
//import java.util.Map;
//
//@Slf4j
////@DependsOn(value = "logExceptionHandler")
//@Configuration
//public class KafkaFactory {
//
//    @Value("${kafka.log.server:localhost:9092}")
//    private String BOOTSTRAP_SERVERS_CONFIG;
//    @Value("${kafka.log.enable.auto.commit:false}")
//    private boolean ENABLE_AUTO_COMMIT_CONFIG;
//    @Value("${kafka.log.auto.commit.interval.ms:1000}")
//    private Integer AUTO_COMMIT_INTERVAL_MS_CONFIG;
//    @Value("${kafka.log.session.timeout.ms:15000}")
//    private Integer SESSION_TIMEOUT_MS_CONFIG;
//    @Value("${kafka.log.fetch.max.wait.ms:500}")
//    private Integer FETCH_MAX_WAIT_MS_CONFIG;
//    @Value("${kafka.log.max.poll.records:100}")
//    private Integer MAX_POLL_RECORDS_CONFIG;
//    @Value("${kafka.log.concurrency:2}")
//    private Integer concurrency;
//    @Value("${kafka.log.partition.offset:null}")
//    private String partitionOffset;
//
////    @Resource
////    private LogExceptionHandler logExceptionHandler;
//
//    @Bean
//    public Map<String, Object> productProps() {
//        Map<String, Object> props = new HashMap<>();
//        props.put(ProducerConfig.ACKS_CONFIG, "1");
//        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
//        props.put(ProducerConfig.RETRIES_CONFIG, 3);
//        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16 * 1024);
//        props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
//        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 32 * 1024 * 1024);
//        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//        return props;
//    }
//
//    @Bean
//    public Map<String, Object> consumerProps() {
//        Map<String, Object> props = new HashMap<>();
//        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);//"localhost:9092"
//        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, ENABLE_AUTO_COMMIT_CONFIG);//false
//        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, AUTO_COMMIT_INTERVAL_MS_CONFIG);//1000 毫秒
//        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, SESSION_TIMEOUT_MS_CONFIG);//15000毫秒
//        props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, FETCH_MAX_WAIT_MS_CONFIG);//一次提取，最大等待时间
//        //一次拉取消息数量
//        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, MAX_POLL_RECORDS_CONFIG);//100条
//        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//        return props;
//    }
//
//    @Bean(value = "kafkaTemplate")
//    public KafkaTemplate<String, String> kafkaTemplate() {
//        return new KafkaTemplate<>(new DefaultKafkaProducerFactory(productProps()));
//    }
//
//    @Bean(value = "defaultKafkaConsumerFactory")
//    public DefaultKafkaConsumerFactory defaultKafkaConsumerFactory() {
//        DefaultKafkaConsumerFactory defaultKafkaConsumerFactory = new DefaultKafkaConsumerFactory(consumerProps());
////        Consumer consumer = defaultKafkaConsumerFactory.createConsumer();//"wisdom-log-group","wisdom-log"
////        //log_0-1,1-2,2-10,3-20
////        log.info("partitionOffset==>" + partitionOffset);
////        if (StringUtils.hasText(partitionOffset)) {
////            String[] offset_arr = partitionOffset.split("_");
////            if (offset_arr.length == 2) {
////                String topic = offset_arr[0];
////                log.info("offset_arr[1]==>" + offset_arr[1]);
////                List<String[]> topic_partition = Arrays.stream(offset_arr[1].split(",")).map(x -> x.split("-")).collect(Collectors.toList());
////                log.info("topic_partition.size==>" + topic_partition.size());
////                topic_partition.forEach(x -> {
////                    log.info("x[0]:" + x[0] + ",x[1]:" + x[1]);
////                    int partition = Integer.parseInt(x[0]);
////                    long offset = Long.parseLong(x[1]);
////                    log.info("init==> topic:" + topic + ",partition:" + partition + ",offset:" + offset);
////                    consumer.assign(Arrays.asList(new TopicPartition(topic, partition)));
////                    consumer.seek(new TopicPartition(topic, partition), offset);
////                });
////            }
////        }
//        return defaultKafkaConsumerFactory;
//    }
//
//    @Bean(value = "logBatchContainerFactory")
//    public ConcurrentKafkaListenerContainerFactory logBatchContainerFactory() {
//        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
//        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
//        factory.setConsumerFactory(defaultKafkaConsumerFactory());
//        //设置并发量，小于或等于Topic的分区数(创建偶数分区个数)
//        factory.setConcurrency(concurrency);
//        //必须 设置为批量监听
//        factory.setBatchListener(true);
//        //factory.setBatchErrorHandler(logExceptionHandler);
//        return factory;
//    }
//
//    @Bean(value = "commonBatchContainerFactory")
//    public ConcurrentKafkaListenerContainerFactory commonBatchContainerFactory() {
//        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
//        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
//        factory.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
//        //设置并发量，小于或等于Topic的分区数(创建偶数分区个数)
//        factory.setConcurrency(concurrency);
//        //必须 设置为批量监听
//        factory.setBatchListener(true);
//        //factory.setBatchErrorHandler(logExceptionHandler);
//        return factory;
//    }
//
//    @Bean(value = "bigBatchContainerFactory")
//    public ConcurrentKafkaListenerContainerFactory bigBatchContainerFactory() {
//        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
//        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
//        factory.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
//        //设置并发量，小于或等于Topic的分区数(创建偶数分区个数)
//        factory.setConcurrency(concurrency * 4);
//        //必须 设置为批量监听
//        factory.setBatchListener(true);
//        //factory.setBatchErrorHandler(logExceptionHandler);
//        return factory;
//    }
//}