package org.poem.kafka;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.poem.enums.KafkaEnums;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.util.Properties;
import java.util.concurrent.Future;

@Component
public class KafkaProducer {

    public static final Logger logger = LoggerFactory.getLogger(KafkaProducer.class);
    private static Producer<String, String> producer;
    private static Properties properties;

    @Value("${kafka.bootstrap-servers.address}")
    private String address;

    /**
     * 启动就可以执行
     */
    @PostConstruct
    public void init() {
        if (properties == null) {
            properties = new Properties();
            properties.put("bootstrap.servers", address);
            properties.put("acks", "all");
            properties.put("retries", 0); // 消息发送请求失败重试次数
            properties.put("batch.size", 2000);
            properties.put("linger.ms", 1); // 消息逗留在缓冲区的时间，等待更多的消息进入缓冲区一起发送，减少请求发送次数
            properties.put("buffer.memory", 33554432); // 内存缓冲区的总量
            properties.put("request.required.acks", "1");
            properties.put("max.request.size", 100000000);
            properties.put("max.partition.fetch.bytes",100000000);
            properties.put("message.max.bytes",100000000);
            properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        }
        producer = new org.apache.kafka.clients.producer.KafkaProducer<String, String>(properties);
        //注册JVM关闭时的回调钩子，当JVM关闭时调用此钩子。
        Runtime.getRuntime().addShutdownHook(new Thread() {
            public void run() {
                System.out.println("Starting exit...");
                //调用消费者的wakeup方法通知主线程退出
                producer.flush();
                producer.close();
            }
        });
    }


    /**
     * 发送消息
     *
     * @param value
     */
    public void sendMessage(String value) {
        try {
            ProducerRecord<String, String> record = new ProducerRecord<String, String>(KafkaEnums.DATA_TRANSFORM_TOP, KafkaEnums.DATA_TRANSFORM_TOP_KEY, value);
            Future<RecordMetadata> send = producer.send(record, new Callback() {

                @Override
                public void onCompletion(RecordMetadata metadata, Exception e) {
                    if (e != null) {
                        logger.error("send record error {}", e);
                    }
                    logger.info("offset: {}, partition: {}" + "," + metadata.offset() + "," + metadata.partition());
                }
            });
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("{}", e);
        }
    }
}

