package org.example.checkpoint;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;

import javax.annotation.Nullable;
import java.nio.charset.Charset;
import java.time.Duration;
import java.util.Collections;

/**
 * @Author: tang
 * @Description:
 * @Date 2025/2/28 16:26
 */
public class EosKafka {

    public static void main(String[] args) throws Exception{

        StreamExecutionEnvironment environment =
                StreamExecutionEnvironment.getExecutionEnvironment();

        environment.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        CheckpointConfig checkpointConfig = environment.getCheckpointConfig();

        checkpointConfig.setMaxConcurrentCheckpoints(1);
        checkpointConfig.setCheckpointStorage("file:///Users/tanglonglong/Downloads/flink");

        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers("127.0.0.1:9092")
                .setTopics(Collections.singletonList("topic_test"))
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .setDeserializer(KafkaRecordDeserializationSchema.valueOnly(StringDeserializer.class))
                .setStartingOffsets(OffsetsInitializer.latest())
                .build();

        KafkaSink<String> kafkaSink1 = KafkaSink.<String>builder()                                                                 
                .setBootstrapServers("127.0.0.1:9092")
                // kafka 精准一次 开启 2pc
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                // 事务
                .setTransactionalIdPrefix("flink-sink-kafka")
                // 事务超时时间  》 checkpoint间隔
                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, String.valueOf(10 * 60 * 1000))
                .setRecordSerializer(new KafkaRecordSerializationSchema<String>() {
                    @Nullable
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String value, KafkaSinkContext context, Long timestamp) {
                        return new ProducerRecord<>("ws",value.getBytes(Charset.defaultCharset()));
                    }
                }).build();

        environment.fromSource(
                kafkaSource,
                WatermarkStrategy.noWatermarks(),
                "kafka-source"
        ).sinkTo(kafkaSink1);

        environment.execute();

    }

}
