package com.yonyou.findata.kafka.newapi;

import com.yonyou.findata.kafka.KafkaProperties;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

/**
 * @author: pizhihui
 * @datae: 2017-11-08
 * 新的生产者是线程安全的，在线程之间共享单个生产者实例
 */
public class ProducerCallbackDemo {

    private static final Logger logger = LoggerFactory.getLogger(ProducerCallbackDemo.class);

    public static void main(String[] args) {

        Properties props = new Properties();
        props.put("bootstrap.servers", KafkaProperties.BROKER_LOCAL);
        props.put("acks", "all"); // ack 应答级别
        props.put("batch.size", 16384); // 批次大小,到达了这个批次大小后,就会写到 buffer.memory 里面去.
        props.put("linger.ms", 1);      // 等待时间
        props.put("buffer.memory", 33554432); // 32M RecordAccumulator 的缓冲区大小, 这个是指定了这个缓冲区的大小,再大就超过了存储数据的大小

        // key value 序列化类
        //props.put("partitioner.class", "com.cuicui.kafkademon.javaclient.JPartitioner");
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

        props.put(ProducerConfig.RETRIES_CONFIG, 10);

        Producer<String, String> producer = new KafkaProducer<>(props);
        long start = System.currentTimeMillis();

        // 发送 100 条数据
        for(int i = 0; i < 100; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>("my-topic", Integer.toString(i), Integer.toString(i));
            // 带回调的发送
            //Future<RecordMetadata> future = producer.send(record, callBack);
            // 不带回调的发送
            producer.send(record);
        }
        logger.info("Time used:{}", System.currentTimeMillis() - start);

        // 会等待所有消息发送完毕后才关闭资源
        producer.close();

    }

    private static Callback callBack = new Callback() {
        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            logger.debug("onCompletion, xmetadata:{}, exception:{}", ToStringBuilder.reflectionToString
                    (metadata, ToStringStyle.SHORT_PREFIX_STYLE), exception);
        }
    };

    private static Callback callback2 = (metadata, exception) -> {
        logger.debug("onCompletion, xmetadata:{}, exception:{}", ToStringBuilder.reflectionToString
                (metadata, ToStringStyle.SHORT_PREFIX_STYLE), exception);
        System.out.println("hello");
    };


}
