package com.zc.imooc.finaltest.controller;

import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.kafka.clients.producer.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import javax.servlet.http.HttpServletRequest;
import java.util.Properties;
import java.util.concurrent.Future;

/**
 * @program: final-test
 * @description: kafka 生产者
 * @author: zengchen
 * @create: 2020-09-21 09:36
 **/
@RestController
@RequestMapping("producer")
public class ProducerController {

    private static final String topicName = "realNewsTopic";

    @Autowired
    private Producer producer;

    @GetMapping(value = "sendSomething")
    public Object sendSomething(HttpServletRequest request) {
        String key = request.getParameter("key");
        String message = request.getParameter("message");
        ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, key, message);
         /*
            1、Kafka Producer是线程安全的，建议多线程复用，如果每个线程都创建，出现大量的上下文切换或争抢的情况，影响Kafka效率
            2、Kafka Producer的key是一个很重要的内容：
                2.1 我们可以根据Key完成Partition的负载均衡
                2.2 合理的Key设计，可以让Flink、Spark Streaming之类的实时分析工具做更快速处理

            3、ack - all， kafka层面上就已经有了只有一次的消息投递保障，但是如果想真的不丢数据，最好自行处理异常
         */
        try {
            producer.send(producerRecord, (metadata, exception) -> System.out.println(key + ":" + message + "  " + ToStringBuilder.reflectionToString(metadata)));
        } catch (Exception e) {
            // 将数据加入重发队列， redis，es，...   定时重发
        }
        return message;
    }

    public static void main(String[] args) throws Exception {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "centos7-out:9092");
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.setProperty(ProducerConfig.ACKS_CONFIG, "all");
        properties.setProperty(ProducerConfig.RETRIES_CONFIG, "0");
        properties.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        properties.setProperty(ProducerConfig.LINGER_MS_CONFIG, "1");
        properties.setProperty(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
//        properties.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.zc.imooc.finaltest.kafka.producerClient.MyPartition");// 自定义分区
        Producer<String, String> producer = new KafkaProducer<>(properties);
//        sendAsyncMessage(producer); // 异步发送
//        sendSyncMessage(producer);  // 阻塞发送
        sendCallbackMessage(producer);  // 回调发送
//        sendCallbackPartitionMessage(producer);  // 自定义分区 回调发送


        producer.close();
    }

    private static void sendCallbackPartitionMessage(Producer<String, String> producer) throws Exception {
        for (int i = 0; i < 10; i++) {
            String key = "key_callback_partition_" + i;
            String value = "value_callback_partition_" + i;
            ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, key, value);
            Future<RecordMetadata> send = producer.send(producerRecord, new Callback() {
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
                    System.out.println(key + ":" + value + "  " + ToStringBuilder.reflectionToString(metadata));
                }
            });
            RecordMetadata recordMetadata = send.get();
//            System.out.println(ToStringBuilder.reflectionToString(recordMetadata));
        }
    }

    private static void sendCallbackMessage(Producer<String, String> producer) throws Exception {
        for (int i = 0; i < 10; i++) {
            String key = "key_callback_" + i;
            String value = "value_callback_" + i;
            ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, key, value);
            Future<RecordMetadata> send = producer.send(producerRecord, new Callback() {
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
                    System.out.println(key + ":" + value + "  " + ToStringBuilder.reflectionToString(metadata));
                }
            });
//            RecordMetadata recordMetadata = send.get();
//            System.out.println(ToStringBuilder.reflectionToString(recordMetadata));
        }
    }

    private static void sendSyncMessage(Producer<String, String> producer) throws Exception {
        for (int i = 0; i < 10; i++) {
            ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, "key_sync_" + i, "value_sync_" + i);
            Future<RecordMetadata> send = producer.send(producerRecord);
            RecordMetadata recordMetadata = send.get();
            System.out.println(ToStringBuilder.reflectionToString(recordMetadata));
        }
    }

    // 异步发送
    private static void sendAsyncMessage(Producer<String, String> producer) throws Exception {
        for (int i = 0; i < 10; i++) {
            ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, "key_" + i, "value_" + i);
            producer.send(producerRecord);
        }
    }


}
