package com.kafka.producer;

import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

public class Producer {

	public static void main(String[] args) throws IOException, InterruptedException, ExecutionException {

		// 1.实例化Kafka集群
		Properties properties = new Properties();

//		properties.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//		properties.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//		properties.setProperty("acks", "all");
//		properties.setProperty("bootstrap.servers", "hadoop103:9092");

		// 使用文件的方法加载配置文件
		properties.load(Producer.class.getClassLoader().getResourceAsStream("producer.properties"));

		KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);

		// 2.用集群对象发送数据
		for (int i = 0; i < 10; i++) {

			Future<RecordMetadata> future = producer.send(new ProducerRecord<String, String>("hello2", Integer.toString(i), "value" + i),

					// 回调函数
					new Callback() {

						@Override
						public void onCompletion(RecordMetadata metadata, Exception exception) {

							if (exception == null) {

								System.out.println(metadata);

							}

						}
					});

			// 让Kafka变成了同步发送（会等待收到ack回复以后再继续发送下一条）
//			RecordMetadata recordMetadata = future.get();

			System.out.println(String.format("发完了%s条", i));

		}

		// 3.关闭资源
		producer.close();

	}

}
