package com.heima.test.kafka;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.junit.Test;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

public class ProducerQuickStart {
    //kafka 生产者

    @Test
    public void sendMsg() throws ExecutionException, InterruptedException {
//        //2. 完成kafka的配置
//        Properties properties = new Properties();
//        //2.1 配置kafka的ip和端口
//        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "39.107.100.177:9092");
//
//        //2.2 配置key的序列化器
//        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//
//        //2.3 配置value的序列化器
//        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//
//        //1. 创建生产者的核心对象
//        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);
//
//        //3. 发送消息
//        for (int i=0; i<100; i++){
//            ProducerRecord<String, String> recod = new ProducerRecord<String, String>("order", "hello", "world" + i);
//            RecordMetadata recordMetadata = producer.send(recod).get();
//            System.out.println(recordMetadata.offset());
//        }
//
//
//        //4. 关闭连接
//        producer.close();

        //完成kafka配置
        Properties properties = new Properties();

        //配置kafka的IP端口连接
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"39.107.100.177:9092");

        //配置key的序列化器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");

        //配置value序列化器
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");

        //封装要发送消息和topic声明
        ProducerRecord<String, String> record = new ProducerRecord<>("order", "hello", "world");

        //创建生产者的核心对象
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

        //发消息
        producer.send(record);

        //关闭消息通道，必须关闭，否则消息发送不成功
        producer.close();

    }

    @Test
    public void sendMsgAsync() throws ExecutionException, InterruptedException {
        //完成kafka配置
        Properties properties = new Properties();

        //配置kafka的IP端口连接
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"39.107.100.177:9092");

        //配置key的序列化器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");

        //配置value序列化器
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");

        //配置消息确认机制 --acks 一般设置1 就是只要集群领导节点收到消息 服务器就给生产者反馈成功响应,但是也看实际业务情况配置
        properties.put(ProducerConfig.ACKS_CONFIG,"1");

        //配置生产者收到错误时可以重发消息的次数
        properties.put(ProducerConfig.RETRIES_CONFIG,10);

        //配置消息的压缩方法 一般用snappy
        properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,"snappy");

        //封装要发送消息和topic声明
            ProducerRecord<String, String> record = new ProducerRecord<>("order", "hello", "world");

        //创建生产者的核心对象
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

        //发消息
        for (int i = 0 ; i < 1000; i++) {
            //封装要发送消息和topic声明
//            ProducerRecord<String, String> record = new ProducerRecord<>("order", "hello", "world"+i);
//            RecordMetadata metadata = producer.send(record).get();
//            System.out.println(metadata);
            producer.send(record);
        }

        //关闭消息通道，必须关闭，否则消息发送不成功
        producer.close();
    }



}
