package com.zero.kafka.producer;

import com.alibaba.fastjson.JSON;
import com.zero.kafka.pojo.Order;
import com.zero.kafka.utils.InitKafkaConfig;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;

public class MyKafkaProducer extends Thread {

    private final String topic;
    private final boolean isAysnc;
    private KafkaProducer<String,String> producer;

    public MyKafkaProducer(String topic, boolean isAysnc) {
        Properties properties = InitKafkaConfig.initProducerConfig();
        this.topic = topic;
        this.isAysnc = isAysnc;
        this.producer = new KafkaProducer<String,String>(properties);
    }

    @Override
    public void run() {
        int msgNum = 5;
        final CountDownLatch countDownLatch = new CountDownLatch(msgNum);
        for (int i = 1; i <= msgNum; i++) {
            Order order = new Order(i, 100 + i, 1, 1000.00);

            //1、指定发送分区 0
            //ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, 0, order.getOrderId().toString(), JSON.toJSONString(order));

            //2、未指定发送分区，具体发送的分区计算公式：hash(key)%partitionNum
            //3、如果没哟制定分区，可以自定义分区
            ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, order.getOrderId().toString(), JSON.toJSONString(order));

            if (isAysnc){
                //异步发送
                producer.send(producerRecord, new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata recordMetadata, Exception exception) {
                        if (exception != null) {
                            System.err.println("发送消息失败：" + exception.getStackTrace());

                        }
                        if (recordMetadata != null) {
                            System.out.println("异步方式发送消息结果：" + "topic-" + recordMetadata.topic() + "|partition-"
                                    + recordMetadata.partition() + "|offset-" + recordMetadata.offset());
                        }
                        countDownLatch.countDown();
                    }
                });

                //TODO 实现业务逻辑


            }else{
                //同步发送
                try {
                    RecordMetadata recordMetadata = producer.send(producerRecord).get();
                    System.out.println("同步方式发送消息结果：" + "topic-" + recordMetadata.topic() + "|partition-"
                            + recordMetadata.partition() + "|offset-" + recordMetadata.offset());
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

        try{
            countDownLatch.await(5, TimeUnit.SECONDS);
        }catch (Exception e){
            e.printStackTrace();
        }finally {
            producer.close();
        }

    }

    public static void main(String[] args) {
        String topic = "my-replicated-topic";
        new MyKafkaProducer(topic,false).start();
    }
}
