package com.leeyu.kafka.selfpartition;

import org.apache.kafka.clients.producer.*;
import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.Future;

public class SysPartitionProducer {
    public static void main(String[] args) {
        // 设置属性
        Properties properties = new Properties();
        // 连接地址
        properties.put("bootstrap.servers", "localhost:9092");
        // key value序列化
        properties.put("key.serializer", StringSerializer.class);
        properties.put("value.serializer", StringSerializer.class);

        // Kafka提供的分区器
        // 默认分区器
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, DefaultPartitioner.class);
        // 轮询分区
        //properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, RoundRobinPartitioner.class);
        // 统一粘性分区
        //properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, UniformStickyPartitioner.class);

        // 构建Kafka生产者对象
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        try {
            ProducerRecord<String, String> record;
            try {
                for (int i = 0; i < 10; i++) {
                    // 构建消息
                    record = new ProducerRecord<>("topic2", "key", "value" + i);
                    Future<RecordMetadata> future = producer.send(record);
                    RecordMetadata recordMetadata = future.get();
                    if (recordMetadata != null) {
                        System.out.println("topic: " + recordMetadata.topic() + ": " + i + ", partition: " + recordMetadata.partition() + ", offset: " + recordMetadata.offset());
                    }
                    System.out.println("发送消息成功！");
                }
            } catch (Exception e) {
                System.out.println("发送消息失败！");
                e.printStackTrace();
            }
        } finally {
            producer.close();
        }
    }
}
