import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

/**
 * @author lilulu
 * @date 2023/2/24 16:28
 */
public class KafkaProduce {
    public static void main(String[] args) {
        // 1- 创建  生产者对象
        // 1.1 设置生产者相关的配置
        Properties props = new Properties();
        props.put("bootstrap.servers", "node1:9092,node2:9092,node3:9092");  // 指定kafka的地址
        props.put("acks", "all"); // 指定消息确认方案
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");// key序列化类
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // value序列化类

        //创建生产者
        Producer kafkaProduce = new KafkaProducer(props);
        //发送数据
        for (int i = 1; i <=10; i++) {
//            ProducerRecord<String, String> test01 = new ProducerRecord("test01", Integer.toString(i));

            //指定分区的策略
//            ProducerRecord<String, String> test01 = new ProducerRecord("test01",2,Integer.toString(i), Integer.toString(i));

            //hash取模的方式分区策略
//            ProducerRecord<String, String> test01 = new ProducerRecord("test01",Integer.toString(i), Integer.toString(i));

            //粘性分区
            ProducerRecord<String, String> test01 = new ProducerRecord("test01", Integer.toString(i));

            kafkaProduce.send(test01);
        }

        //释放资源
        kafkaProduce.close();
    }
}
