package com.cloudsubassemblykafka10024.tranactions;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.UUID;

/**
 * @Description: Kafka 事务
 * @Author zx
 * @Date 2024/12/24 19:55
 * @Version 1.0
 */
public class KafkaProducerTransactions_001 {
    public static void main(String[] args) {

        KafkaProducer<String, String> properties = buildKafkaProducer();

        // 开启事务 初始化事务
        properties.initTransactions();
        try {
            // 开启事务
            properties.beginTransaction();

            for (int i = 0; i < 100; i++) {
//                if (i == 8) {
//                    int j = 1 / 0;
//                }
                ProducerRecord<String, String> producerRecord = new ProducerRecord<>("messageDML005", "Test" + i, "ACK测试" + i);
                properties.send(producerRecord);
                properties.flush();
            }


            // 事务提交
            properties.commitTransaction();

        } catch (Exception e) {
            System.out.println("事务提交失败" + e.getMessage());
            // 终止事务
            properties.abortTransaction();

        } finally {
            properties.close();
        }


        // 关闭资源
        properties.close();

    }

    public static KafkaProducer<String, String> buildKafkaProducer() {
        // 创建 Kafka Producer
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.18.220:9092");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 设置事务ID
        properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transaction-id-" + UUID.randomUUID());

        // 配置Kafka的批处理大小
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 1024);
        // 配置Kafka的延迟时间 5ms  如果batch中的数据不足1024 则立即发送
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 5);

        // 配置Kafka的重试和幂等机制
        properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 20000);

        return new KafkaProducer<>(properties);
    }
}
