package com.wang.producer;

import org.apache.kafka.clients.producer.*;

import java.util.Properties;

public class CallBackProducer {

	public static void main(String[] args) {

		// 创建Kafka生产者的配置信息
		Properties properties = new Properties();
		// 指定连接的Kafka集群
		properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop114:9092");
		// ACK应答级别
		properties.put(ProducerConfig.ACKS_CONFIG, "all");
		// 消息发送最大重试次数
		properties.put(ProducerConfig.RETRIES_CONFIG, 1);
		// 一批消息处理大小，到了16k会发送
		properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
		// 请求延时，或者到了1毫秒会发送
		properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
		// 发送缓存区内存大小32m
		properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
		// key序列化
		properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
		// value序列化
		properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
		// 自定义分区器
		properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.wang.partitioner.CustomPartitioner");

		// 创建生产者对象
		KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
		for (int i = 0; i < 10; i++) {
			producer.send(new ProducerRecord<>("test1", Integer.toString(i),"hello world-" + i), new Callback() {
				@Override
				public void onCompletion(RecordMetadata recordMetadata, Exception e) {
					if(recordMetadata != null){
						System.out.println(recordMetadata.partition() + "---" + recordMetadata.offset());
					}else {
						e.printStackTrace();
					}
				}
			});
		}
		// 关闭资源
		producer.close();
	}

}
