package app.kafka.produce;

import java.util.Properties;
import java.util.concurrent.Future;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

/**
 * 测试发送Dis的主题数据
 * @author lfy.xys
 * @date 2018年5月31日
 *
 */
public class TestDisProducer {
	
	private Producer<String, String> producer = null;

	/**
	 * 设置
	 * 
	 * @author lfy.xys
	 * @date 2018年5月31日
	 *
	 */
	public void setProducer() {
		Properties props = new Properties();
		props.put("bootstrap.servers", "192.168.3.40:9092");
		props.put("acks", "all");
		props.put("retries", 0);
		props.put("batch.size", 16384);
		props.put("linger.ms", 1);
		props.put("buffer.memory", 33554432);
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		// 配置partitionner选择策略，可选配置
		// props.put("partitioner.class", "com.dx.SimplePartitioner2");
		producer = new KafkaProducer<String, String>(props);
	}

	public void produce(String topic, String key, String msg) {
		if (producer == null) {
			setProducer();
		}

		ProducerRecord<String, String> data = new ProducerRecord<String, String>(topic, key, msg);
		Future<RecordMetadata> send = producer.send(data, new Callback() {
			public void onCompletion(RecordMetadata metadata, Exception e) {
				if (e != null) {
					e.printStackTrace();
				} else {
					System.out.println("The offset of the record we just sent is: " + metadata.offset());
				}
			}
		});
	}

	public static void main(String[] args) {
		String key = "key";
		String msg = "111";
		String topic1 = "topic_1";
		String topic2 = "topic_2";
		TestDisProducer a = new TestDisProducer();
		a.produce(topic1, key, msg);
		System.out.println("发送了1");
		a.produce(topic2, key, msg);
		System.out.println("发送了2");
	}
}
