package com.cxz.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.util.Collection;
import java.util.Collections;
import java.util.Properties;

/**
 * Kafka通用配置.
 * @author
 * @since
 */
public class KafkaCommonProperties {

    /**
     * Kafka主机
     */
    public final static String KAFKA_HOST = "8.155.22.194:9092";

    /**
     * 生产者：要求leader请求结束前收到的确认次数，来控制发送数据的持久化
     * 消息确认：
     * 0：生产者不等待服务器确认，此时retry参数不生效
     * 1：leader写入记录到log，不会等待follower的确认即向生产者发送通知
     * all：leader等待所有副本通知，然后向生产者发送通知，保证所有数据落盘到所有副本，功能同设置为-1
     */
    public final static String ACK = "all";

    /**
     * 生产者重试次数
     */
    public final static Integer RETRY_TIMES = 1;

    /**
     * 生产者：向同一分区发送打包发送的数据量，单位：bytes，默认16384bytes=16K
     */
    public final static Integer BATCH_SIZE = 16384;

    /**
     * 生产者：批量发送消息的间隔时间（延迟时间），单位：毫秒
     */
    public final static Integer LINGER_MS = 1;

    /**
     * 生产者：可以使用的最大缓存空间，单位：bytes，默认33554432bytes=32M.
     */
    public final static Integer BUFFER_MEMORY = 33554432;

    /**
     * 生产者：键编码器
     */
    public final static String KEY_ENCODER = "org.apache.kafka.common.serialization.StringSerializer";

    /**
     * 生产者：值编码器
     */
    public final static String VALUE_ENCODER = "org.apache.kafka.common.serialization.StringSerializer";

    /**
     * 消费者：消费topic的组ID
     */
    public final static String GROUP_ID = "my-group-id";

    /**
     * 消费者：后台定期提交offset
     */
    public final static String AUTO_COMMIT = "true";

    /**
     * 消费者提交offset的时间间隔：单位：毫秒，当enable.auto.commit为true时生效
     */
    public final static String AUTO_COMMIT_INTERVAL_MS = "1000";

    /**
     * 消费者：键解码器
     */
    public final static String KEY_DECODER = "org.apache.kafka.common.serialization.StringDeserializer";

    /**
     * 消费者：值解码器
     */
    public final static String VALUE_DECODER = "org.apache.kafka.common.serialization.StringDeserializer";

    /**
     * 消费者：重启后配置offset
     * earliest：消费者恢复到当前topic最早的offset
     * latest：消费者从最新的offset开始消费
     * none：如果消费者组没找到之前的offset抛出异常
     * 其他任何值都会抛出异常
     */
    public final static String AUTO_OFFSET_RESET = "latest";

    /**
     * TOPIC
     */
    public final static Collection<String> TOPIC = Collections.singleton("my-topic");

    public KafkaCommonProperties() {

    }

    /**
     * 获取默认的Kafka生产者配置
     * @return
     */
    public static Properties getDefaultKafkaProducerConfig() {
        Properties properties = new Properties();

        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaCommonProperties.KAFKA_HOST);
        properties.put(ProducerConfig.ACKS_CONFIG, KafkaCommonProperties.ACK);
        properties.put(ProducerConfig.RETRIES_CONFIG, KafkaCommonProperties.RETRY_TIMES);
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, KafkaCommonProperties.BATCH_SIZE);
        properties.put(ProducerConfig.LINGER_MS_CONFIG, KafkaCommonProperties.LINGER_MS);
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, KafkaCommonProperties.BUFFER_MEMORY);
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaCommonProperties.KEY_ENCODER);
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaCommonProperties.VALUE_ENCODER);

        return properties;
    }

    /**
     * 获取默认的kafka消费者配置
     * @return
     */
    public static Properties getDefaultKafkaConsumerConfig() {
        Properties properties = new Properties();

        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaCommonProperties.KAFKA_HOST);
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaCommonProperties.GROUP_ID);
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, KafkaCommonProperties.AUTO_COMMIT);
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, KafkaCommonProperties.AUTO_COMMIT_INTERVAL_MS);
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, KafkaCommonProperties.AUTO_OFFSET_RESET);
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaCommonProperties.KEY_DECODER);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaCommonProperties.VALUE_DECODER);

        return properties;
    }
}

