package com.dsh.kafaka;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.util.Properties;
import java.util.concurrent.TimeUnit;


/**
 * 不能直接关机，会导致zk的 myid 问题
 * <p>
 * <p>
 * kafka 后台启动命令
 * bin/kafka-server-start.sh -daemon config/server.properties
 * 生产者
 * bin/kafka-console-producer.sh --broker-list node1:9092 --topic test
 * 消费者
 * bin/kafka-console-consumer.sh --bootstrap-server node1:9092 --topic test --from-beginning
 * 创建主题
 * bin/kafka-topics.sh --create --bootstrap-server node1:9092 --topic test
 * 查看列表
 * kafka-console-consumer.sh --broker-list node1:9092--from-beginning --topic test
 * <p>
 * <p>
 * <p>
 * hive
 * nohup bin/hive --service metastore >> logs/metastore.log 2>&1 &
 * nohup bin/hive --service hiveserver2 >> logs/hiveserver2.log 2>&1 &
 */
public class KafkaProducerTest2 {
    public static void main(String[] args) throws Exception {

        // 连接配置
        Properties props = new Properties();
        props.put("bootstrap.servers", "node1:9092");
        props.put("acks", "all");
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        // 创建一个生产者对象kafkaProducer
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(props);
        int max = 1000000;
        //发送消息i
        for (int i = 100000; i < max; ++i) {
            // 第一种方式  发送同步消息对象
//            ProducerRecord<String, String> test = new ProducerRecord<>("test", null, i + "___消息");
//            //调用futuer 的get方法等待响应
//            Future<RecordMetadata> future = kafkaProducer.send(test);
//            future.get();
//            //调用
//            System.out.println("send第_" + i + "_条消息");
            //第2种方式  使用异步方式发送消息  消息放在这里
            ProducerRecord<String, String> producerRecord = new ProducerRecord<>("test", null, i + "_消息");
            kafkaProducer.send(producerRecord, new Callback() {
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
                    if (exception == null) {
                        String topic = metadata.topic();
                        int partition = metadata.partition();
                        long offset = metadata.offset();
                        System.out.println("topic:" + topic + "offset:" + offset + "partition:" + partition);
                    } else {
                        System.out.println(111);
                    }
                }

            });
            Thread.sleep(1000);
        }
        kafkaProducer.close();

    }
}
