package com.flrjcx;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import sun.rmi.runtime.Log;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

/**
 * kafka生产者程序
 *
 * @author Flrjcx
 */
public class ProducerKafka {
    public static void main(String[] args) throws ExecutionException, InterruptedException {
//        1. 创建连接kafka的配置文件
        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.14.157:9092");
        props.put("acks", "all");
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

//        2. 创建生产者
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(props);

//        3. 发送数据到指定的topic
//        同步发送
//        syncSendMsg(kafkaProducer);

//      异步发送
        asyncSendMsg(kafkaProducer);
//        4. 关闭
        kafkaProducer.close();
    }

    //    同步方式
    public static void syncSendMsg(KafkaProducer<String, String> kafkaProducer) throws ExecutionException, InterruptedException {
        for (int i = 0; i < 100; i++) {
//            构建消息, new ProducerRecord
            Future<RecordMetadata> future = kafkaProducer.send(new ProducerRecord<>("test", "key" + i, i + ""));
//            调用Future.get等待响应,避免乱序,按照生产顺序消费
            future.get();
            System.out.println("第" + i + "条消息发送");
        }
    }

    //    带回调函数异步方式
    public static void asyncSendMsg(KafkaProducer<String, String> kafkaProducer) throws ExecutionException, InterruptedException {
        for (int i = 100000; i < 1000000; i++) {
            kafkaProducer.send(new ProducerRecord<String, String>("test", "flrjcx" + i, i + ""), new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        System.out.println("发送消息出现异常");
                        e.printStackTrace();
                    } else {
                        String topic = recordMetadata.topic();
                        int partition = recordMetadata.partition();
                        long offset = recordMetadata.offset();
                        System.out.println("消息发送到 kafka 中"+topic+"的主题,第"+partition+"分区," +
                                "第"+offset+"条数据");
                    }
                }
            });
            Thread.sleep(1000);
        }
        kafkaProducer.close();
    }
}
