package org.example.sink;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeutils.base.StringSerializer;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.Charset;

/**
 * @Author: tang
 * @Description:
 * @Date 2025/2/19 09:23
 */
public class KafkaSink {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();

        environment.enableCheckpointing(2000, CheckpointingMode.EXACTLY_ONCE);

        DataStreamSource<String> streamSource = environment.socketTextStream("127.0.0.1", 7777);

//        streamSource.print();

        org.apache.flink.connector.kafka.sink.KafkaSink<String> kafkaSink = org.apache.flink.connector.kafka.sink.KafkaSink
                .<String>builder()
                .setBootstrapServers("127.0.0.1:9092")
//                .setRecordSerializer(
//                        KafkaRecordSerializationSchema.<String>builder()
//                                .setTopic("topic_test")
//                                .setValueSerializationSchema(new SimpleStringSchema())
//                                .build()
//                )
                // 自定义序列化器
                .setRecordSerializer(new KafkaRecordSerializationSchema<String>() {
                    @Nullable
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String element, KafkaSinkContext context, Long timestamp) {
                        String[] splitArr = element.split(",");
                        return new ProducerRecord<>("topic_test",
                                splitArr[0].getBytes(Charset.defaultCharset()),
                                splitArr[1].getBytes(Charset.defaultCharset())
                        );
                    }
                })
                // 精准一次，必须设置下面的参数
                .setTransactionalIdPrefix("flink-kafka-sink")
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                // 大于checkpoint间隔，小于 max 15分钟
                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, String.valueOf(10 * 60 * 1000))
                .build();

        streamSource.sinkTo(kafkaSink);

        environment.execute();

    }

}
