package edu.nepu.flink.api.sink;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.UUID;

/**
 * @Date 2024/2/29 10:02
 * @Created by chenshuaijun
 */
public class SinkToKafkaSerializa {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        DataStreamSource<String> streamSource = env.socketTextStream("hadoop102", 9999);
        /**
         * 如果下游是kafka，我们需要保证数据的一致性，我们就需要使用到事务
         * 使用事务需要满足下面的条件
         * (1) 首先要开启checkpoint,且语义级别要是精确一次性
         * (2) 设置kafka的交付级别是精确一致性
         * (3) 设置事务前缀
         * (4) 设置超时时间 >= 最大checkpoint的持续时间 + 最大重启持续时间 -->tips:但是不能够超过produce默认的事务超时时间，默认值是15分钟
         */

        env.enableCheckpointing(2000, CheckpointingMode.EXACTLY_ONCE);

        env.setRestartStrategy(RestartStrategies.noRestart());

        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
                .setRecordSerializer(new KafkaRecordSerializationSchema<String>() {
                                         @Nullable
                                         @Override
                                         public ProducerRecord<byte[], byte[]> serialize(String element, KafkaSinkContext context, Long timestamp) {
                                             String key = UUID.randomUUID().toString().substring(0, 4);
                                             byte[] keyBytes = key.getBytes(StandardCharsets.UTF_8);
                                             byte[] valueBytes = element.getBytes(StandardCharsets.UTF_8);
                                             return new ProducerRecord<>("sink-kafka",keyBytes,valueBytes);
                                         }
                                     }
                )
                .setTransactionalIdPrefix("nepu-")
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,String.valueOf(10*60*1000))
                .build();

        streamSource.sinkTo(kafkaSink);


        env.execute();
    }
}
