package com.kl.example;

import com.kl.example.interceptor.ProducerInterceptorExample;
import com.kl.example.model.MessageModelExample;
import com.kl.example.serializer.MyMessageProtostuffSerializer;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class ProducerExample {

    private final static Logger logger = LoggerFactory.getLogger(ProducerExample.class);

    // 消息推送到 'test' 主题里面
    private static final String topic = "test";

    /**
     * 最基本的消息发送
     */
    @Test
    public void testBaseSend() {
        Properties props = initProducerProperties();
        // 额外补充：指定生产者拦截器
        props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptorExample.class.getName());
        Producer<String, Object> producer = new KafkaProducer<>(props);
        // 测试发送十条消息到broker中的主题里，主题是逻辑意义上的
        for (int i = 1; i <= 5; i++) {
            // 构建消息：说明消息发往哪个主题，指定消息的 k-v，会通过key的值将消息保存到某一个分区中
            ProducerRecord<String, Object> record = new ProducerRecord<>(topic, "key-" + i, "msg-" + i * 100);
            // send方法是异步的，返回Future对象，如果调用get()，将阻塞，直到相关请求完成并返回消息的metadata或抛出异常
            // 可以在send方法中传入 CallBack 接口实现类实现异步响应，kafka在返回响应时会会回调该接口
            Future<RecordMetadata> metadataFuture = producer.send(record);
            try {
                RecordMetadata recordMetadata = metadataFuture.get();
                System.out.printf("当前主题 = %s, 消息发往的分区 = %s, 偏移量 = %d%n",
                        recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
        }
        System.out.printf("%s%n", "消息投递完毕");
        // 生产者的缓冲空间池保留尚未发送到服务器的消息，后台I/O线程负责将这些消息转换程请求发送到集群
        // 如果使用后不刷新缓冲区或关闭生产者，将会丢失这些消息
        producer.flush();
        producer.close();
    }

    /**
     * 自定义序列化器测试
     * 消费者也需要使用自定义的反序列化器
     */
    @Test
    public void testMySerializer() {
        Properties props = initProducerProperties();
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        //方式一
        //props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, MyMessageJsonSerializer.class.getName());
        //方式二
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, MyMessageProtostuffSerializer.class.getName());
        Producer<String, Object> producer = new KafkaProducer<>(props);
        for (int i = 1; i <= 5; i++) {
            ProducerRecord<String, Object> record = new ProducerRecord<>(topic, "key-" + i,
                    new MessageModelExample("test-" + i, "object-" + i));
            try {
                RecordMetadata recordMetadata = producer.send(record).get();
                System.out.printf("当前主题 = %s, 消息发往的分区 = %s, 偏移量 = %d%n",
                        recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
        }
        System.out.printf("%s%n", "消息投递完毕");
        producer.flush();
        producer.close();
    }

    /**
     * 设置消费者的TTL消息过期拦截器功能
     * 模拟发送三条消息，第一条和第三条消息是超时消息，正常情况会被消费者TTL拦截器拦截过滤
     * 只有第二条消息可以正常被消费者获取消费
     */
    @Test
    public void testTTLInterceptor() {
        try {
            Properties props = initProducerProperties();
            Producer<String, Object> producer = new KafkaProducer<>(props);
            // 1.模拟一条过期消息
            ProducerRecord<String, Object> record1 = new ProducerRecord<>(topic, 0,
                    System.currentTimeMillis() - 10 * 1000,
                    "key-1", "first-expire-data");
            producer.send(record1).get();

            // 2.正常消息
            ProducerRecord<String, Object> record2 = new ProducerRecord<>(topic, 0,
                    System.currentTimeMillis(),
                    "key-2", "normal-expire-data");
            producer.send(record2).get();

            // 3.模拟一条过期消息
            ProducerRecord<String, Object> record3 = new ProducerRecord<>(topic, 0,
                    System.currentTimeMillis() - 10 * 1000,
                    "key-3", "last-expire-data");
            producer.send(record3).get();

        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
    }

    /**
     * 初始化生产者客户端必要的四个属性配置
     * 对于key，在生产者配置类中都有对应的常量
     */
    public static Properties initProducerProperties() {
        Properties props = new Properties();
        // broker如果要指定多个，可以用逗号隔开
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        // 生产者客户端的id，如果不指定默认为producer-1,producer-2的形式
        props.put(ProducerConfig.CLIENT_ID_CONFIG, "producer.client.id.demo");
        // key=key.serializer
        // value=org.apache.kafka.common.serialization.StringSerializer
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 消息发送失败时的重试次数
        props.put(ProducerConfig.RETRIES_CONFIG, 3);
        return props;
    }

}