package com.intct.flink;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;

/**
 * @author gufg
 * @since 2025-06-26 14:08
 */
public class KafkaSinkDemo {
    public static void main(String[] args) throws Exception {

        // 创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 设置并行
        env.setParallelism(1);

        // 开启检查点,每3秒一次, 模式为 精准一次
        env.enableCheckpointing(3000, CheckpointingMode.AT_LEAST_ONCE);

        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30),Time.seconds(3)));

        // Source == 输入
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers("jd-node:9092")
                .setGroupId("kafka-source-demo")
                .setTopics("my-source-topic")
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .setStartingOffsets(OffsetsInitializer.latest())
                .build();

        DataStreamSource<String> kafkaDS = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka_source_demo");

        // kafkaDS.print();

        // Sink  == 输出
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                .setBootstrapServers("jd-node:9092")
                .setRecordSerializer(KafkaRecordSerializationSchema.<String>builder()
                        .setTopic("my-topic")
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build()
                )
                /**
                 *     EXACTLY_ONCE("exactly-once", TextElement.text("Records are only delivered exactly-once also under failover scenarios.
                 *     To build a comlete exactly-once pipeline is required that the source and sink support exactly-once and are properly configured.")),
                 *     AT_LEAST_ONCE("at-least-once", TextElement.text("Records are ensured to be delivered
                 *     but it may happen that the same record is delivered multiple times. Usually, this guarantee is faster than the exactly-once delivery.")),
                 *     NONE("none", TextElement.text("Records are delivered on a best effort basis. It is often the fastest way to
                 *     process records but it may happen that records are lost or duplicated."));
                 *
                 *
                 *     如果使用DeliveryGuarantee.EXACTLY_ONCE 的语义保证，则需要使用 setTransactionalIdPrefix(String)
                 */
                /**
                 * KafkaSink 总共支持三种不同的语义保证（DeliveryGuarantee）。对于 DeliveryGuarantee.AT_LEAST_ONCE 和
                 *          DeliveryGuarantee.EXACTLY_ONCE，Flink checkpoint 必须启用。
                 *          默认情况下 KafkaSink 使用 DeliveryGuarantee.NONE。 以下是对不同语义保证的解释：
                 * DeliveryGuarantee.NONE 不提供任何保证：消息有可能会因 Kafka broker 的原因发生丢失或因 Flink 的故障发生重复。
                 * DeliveryGuarantee.AT_LEAST_ONCE: sink 在 checkpoint 时会等待 Kafka 缓冲区中的数据全部被 Kafka producer 确认。
                 *          消息不会因 Kafka broker 端发生的事件而丢失，但可能会在 Flink 重启时重复，因为 Flink 会重新处理旧数据。
                 * DeliveryGuarantee.EXACTLY_ONCE: 该模式下，Kafka sink 会将所有数据通过在 checkpoint 时提交的事务写入。
                 *          因此，如果 consumer 只读取已提交的数据（参见 Kafka consumer 配置 isolation.level），
                 *          在 Flink 发生重启时不会发生数据重复。然而这会使数据在 checkpoint 完成时才会可见，因此请按需调整 checkpoint 的间隔。
                 *          请确认事务 ID 的前缀（transactionIdPrefix）对不同的应用是唯一的，以保证不同作业的事务 不会互相影响！此外，
                 *          强烈建议将 Kafka 的事务超时时间调整至远大于 checkpoint 最大间隔 + 最大重启时间，否则 Kafka 对未提交事务的过期处理会导致数据丢失。
                 */
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                .setTransactionalIdPrefix("kafka_sink_demo")
                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,"60000")
                .build();

        kafkaDS.sinkTo(kafkaSink);

        env.execute();
    }
}
