package top.zpengblog.kafkaprogram.producer;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import javax.annotation.PostConstruct;
import java.util.Properties;

/**
 * @author linzp
 * @version 1.0.0
 * CreateDate 2020/11/19 19:35
 */
@Service
@Slf4j
public class KafKaProducer {
    /**
     * 生产者.
     */
    private KafkaProducer<String, String> producer;

    /**
     * 结点.
     */
    @Value("${kafka.servers}")
    private String servers;

    /**
     * topic.
     */
    @Value("${kafka.topic}")
    private String topic;

    /**
     * 初始化.
     */
    @PostConstruct
    private void init() {
        Properties props = new Properties();
        props.put("bootstrap.servers", servers);
        props.put("acks", "all");
        props.put("retries", 0);
        props.put("batch.size", 16384);
        props.put("linger.ms", 1);
        props.put("buffer.memory", 33554432);
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        // 设置分区类,根据key进行数据分区
        producer = new KafkaProducer<String, String>(props);
        log.info("kafka 生产者启动成功！！！！！！！");
    }

    /**
     * 发送消息.
     *
     * @param data
     */
    public void produce(String data) {
        producer.send(new ProducerRecord<String, String>(topic, data));
    }
}
