package app.kafka.produce;

import java.util.Properties;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import app.htby.utils.KafkaProduceUtils;

/**
 * 心跳监控
 * 
 * @author lfy.xys
 * @date 2018年5月31日
 *
 */
public class HeartbeatProducer {

	private Producer<String, String> producer = null;

	/**
	 * 设置
	 * 
	 * @author lfy.xys
	 * @date 2018年5月31日
	 *
	 */
	public void setProducer() {
		Properties props = new Properties();
		props.put("bootstrap.servers", KafkaProduceUtils.bootstrapServers);
		props.put("acks", KafkaProduceUtils.acks);
		props.put("retries", KafkaProduceUtils.retries);
		props.put("batch.size", KafkaProduceUtils.batch_size);
		props.put("linger.ms", KafkaProduceUtils.linger_ms);
		props.put("buffer.memory", KafkaProduceUtils.buffer_memory);
		props.put("key.serializer", KafkaProduceUtils.key_serializer);
		props.put("value.serializer", KafkaProduceUtils.value_serializer);
		// 配置partitionner选择策略，可选配置
		// props.put("partitioner.class", "com.dx.SimplePartitioner2");
		producer = new KafkaProducer<String, String>(props);
	}

	public void produce(String topic, String key, String msg) {
		if (producer == null) {
			setProducer();
		}

		ProducerRecord<String, String> data = new ProducerRecord<String, String>(topic, key, msg);
		//发送数据
		producer.send(data, new Callback() {
			public void onCompletion(RecordMetadata metadata, Exception e) {
				if (e != null) {
					e.printStackTrace();
				} else {
					System.out.println("The offset of the record we just sent is: " + metadata.offset());
				}
			}
		});
	}

}
