package com.boot.config;


import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.streams.StreamsConfig;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

//@EnableKafka
@Configuration
public class KafkaConfig {
    //localhost:9092
    //hadoop-1:9092,hadoop-2:9092,hadoop-3:9092
    public String kafkaServer = "hadoop-1:9092";

    /*********kafka producer************/
    @Bean
    public Properties properties() {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaServer); //Kafka cluster hosts.
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, "spring-boot-demo"); // Group id
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4");
        return properties;
    }


    //    @Bean
//    public NewTopic initialTopic() {
//        return new NewTopic(this.billingTopic, 16, (short) 2);
//    }

    @Bean
    public KafkaAdmin kafkaAdmin() {
        Map<String, Object> props = new HashMap<>();
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaServer);
        return new KafkaAdmin(props);
    }

    @Bean
    public AdminClient adminClient() {
        return AdminClient.create(this.kafkaAdmin().getConfigurationProperties());
    }

    @Bean
    public KafkaTemplate<String, String> kafkaTemplate(@Qualifier("properties") Properties properties) {
        ProducerFactory<String, String> producerFactory = new DefaultKafkaProducerFactory(properties);
        return new KafkaTemplate<>(producerFactory);
    }

    @Bean
    public Producer<String, String> producer(@Qualifier("properties") Properties propertie) {
        return new KafkaProducer<>(propertie);
    }


    private Map<String, Object> buildConsumerConfig(String group) {
        Map<String, Object> propsMap = new HashMap<>();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaServer);
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, group);
        propsMap.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 8 * 1024 * 1024);
        propsMap.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 5_000);
        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 90_000);
        propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);
        propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 60_000);
        return propsMap;
    }


    private ConcurrentKafkaListenerContainerFactory<String, String> containerFactory(Map<String, Object> consumerConfig) {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        //factory.setRetryTemplate(this.retryTemplate());
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfig));
        factory.setConcurrency(3);
        //打开此选项，会导致listener只能为batchMessageListener
//        factory.setBatchListener(true);
//        factory.setErrorHandler(new RetryErrorHandler());
        return factory;
    }


    @Bean(name = "kafkaListenerContainerFactory")
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
        Map<String, Object> config = this.buildConsumerConfig("total-group");
        config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1);
        config.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 30_000);
        config.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 10_000);
        config.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 32_000);
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        ConcurrentKafkaListenerContainerFactory<String, String> factory = this.containerFactory(config);
        factory.setConcurrency(3);
        return factory;
    }


    /*********kafka stream************/
    //http://kafka.apache.org/26/documentation/streams/developer-guide/config-streams.html
    @Bean
    public Properties streamProperties() {
        Properties props = new Properties();

        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-app");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaServer);
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "kafka-streams-app");
        //为了保存状态，RocksDB会把state store的内容flush到StreamsConfig.STATE_DIR_CONFIG指定的磁盘上
        //出于灾备和扩展性的考虑，state store的内容同样会默认保存changelog到kafka的topic中，topic名字为<application_id>-<state_store_name>-changelog，可以通过enableLogging和disableLogging来开启或关闭该功能
        props.put(StreamsConfig.STATE_DIR_CONFIG, "/opt/app/logs/state-store");
        props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 60_000);
        props.put(StreamsConfig.POLL_MS_CONFIG, 60_000);
        //at_least_once, exactly_once
        //kafka 0.11.0.0版本正式支持精确一次处理语义(exactly once semantics，下称EOS)。Kafka的EOS主要体现在3个方面：
        //幂等producer：保证发送单个分区的消息只会发送一次，不会出现重复消息，在producer程序中设置属性enable.idempotence=true，但不要设置transactional.id。注意是不要设置，而不是设置成空字符串或"null"
        //事务(transaction)：保证原子性地写入到多个分区，即写入到多个分区的消息要么全部成功，要么全部回滚，在producer程序中设置属性transcational.id为一个指定字符串(你可以认为这是你的事务名称，故最好起个有意义的名字)，同时设置enable.idempotence=true
        //流处理EOS：流处理本质上可看成是“读取-处理-写入”的管道。此EOS保证整个过程的操作是原子性。注意，这只适用于Kafka Streams，在Kafka Streams程序中设置processing.guarantee=exactly_once
        // props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE);
        //流处理应用会创建change-log 和replication topic 管理内部状态，用于设置topic的副本数
        props.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, 2);
        //分区发生变化后，在删除状态之前等待的时间（以毫秒为单位）
        props.put(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG, 120_000);
        //props.put(StreamsConfig.producerPrefix(ProducerConfig.ACKS_CONFIG), "1");
        //设置事件时间 ，
        //props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, StatictisTimestampExtractor.class);
        // props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, CustomRocksDBConfigSetter.class);

        //全局参数
        //关闭缓存可能会导致底层RocksDB存储的大量写流量,默认设置下，Kafka Streams中启用了缓存，但RocksDB缓存被禁用。因此为避免高写入流量，如果关闭Kafka Streams缓存，建议启用RocksDB缓存。
        props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 10 * 1024 * 1024);
        //数据提交的时间间隔，当 PROCESSING_GUARANTEE_CONFIG设置为EXACTLY_ONCE时 ，COMMIT_INTERVAL_MS_CONFIG固定为EOS_DEFAULT_COMMIT_INTERVAL_MS，OMMIT_INTERVAL_MS_CONFIG默认为100Lms
        props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);


        //删除过期窗口的预留时间，默认是1天
        props.put(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, 12 * 6060 * 1000);
        return props;
    }


}
