/*
* 四川生学教育科技有限公司
* Copyright (c) 2015-2025 Founder Ltd. All Rights Reserved.
*
* This software is the confidential and proprietary information of
* Founder. You shall not disclose such Confidential Information
* and shall use it only in accordance with the terms of the agreements
* you entered into with Founder.
*
*/
package cn.demo.kafka.demo2;

import org.apache.kafka.clients.producer.*;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**
 * @author WUANG (wa@sxw.cn)
 * @description Producer
 * @date 2018/11/8 11:28
 * @slogon 站在巨人的肩膀上
 * @since 2.0.0
 */
public class Producer extends Thread {

    public static final String TOPIC_NAME = "test2";

    private final KafkaProducer<Integer, String> producer;
    private final String topic;
    // 是否需要异步发送
    private final Boolean isAsync;
    // 装有Kafka的机器的IP地址
    private final String serverIp = "120.78.169.174";

    public Producer(String topic, Boolean isAsync) {
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, serverIp + ":9092");
        //client.id:当向server发出请求时，这个字符串会发送给server。目的是能够追踪请求源头，以此来允许ip/port许可列表之外的一些应用可以发送信息。这项应用可以设置任意字符串，因为没有任何功能性的目的，除了记录和跟踪
        props.put(ProducerConfig.CLIENT_ID_CONFIG, "test-consumer-group");
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.IntegerSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        //我在server.properties里把num.partitions修改为了6，所有新建的topic都会分配6个分区，生产者会根据这里的自定义分区策略发送消息到6个分区上;参考https://www.cnblogs.com/shijiaoyun/p/6281580.html的解释
        props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, PartitionUtil.class.getName());
        producer = new KafkaProducer<Integer, String>(props);
        this.topic = topic;
        this.isAsync = isAsync;
    }

    public void run() {
        int messageNo = 1;
        while (true) {
            sleep(5);
            String messageStr = "Message_" + messageNo;
            long startTime = System.currentTimeMillis();
            if (isAsync) {
                producer.send(new ProducerRecord<Integer, String>(topic,/* 5,*/
                        messageNo,
                        messageStr), new DemoCallBack(startTime, messageNo, messageStr));
            } else {
                try {
                    producer.send(new ProducerRecord<Integer, String>(topic,
                            messageNo,
                            messageStr)).get();
                    System.out.println("Sent message: (" + messageNo + ", " + messageStr + ")");
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }
            ++messageNo;
        }
    }

    private void sleep(int x) {
        try {
            Thread.sleep(x * 1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
        // 开启生产者线程后，会向Kafka节点中对应的topic发送Message_**类型的消息
        boolean isAsync = true;
        Producer producerThread = new Producer(TOPIC_NAME, isAsync);
        producerThread.start();
    }
}

class DemoCallBack implements Callback {
    private long startTime;
    private int key;
    private String message;

    public DemoCallBack(long startTime, int key, String message) {
        this.startTime = startTime;
        this.key = key;
        this.message = message;
    }

    /**
     * 当异步发送完成后需要进行的处理
     **/
    public void onCompletion(RecordMetadata metadata, Exception exception) {
        long elapsedTime = System.currentTimeMillis() - startTime;
        if (metadata != null) {
            System.out.println(
                    "message(" + key + ", " + message + ") sent to partition(" + metadata.partition() +
                            "), " +
                            "offset(" + metadata.offset() + ") in " + elapsedTime + " ms");
        } else {
            exception.printStackTrace();
        }
    }
}