package org.example.sink;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchemaBuilder;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;

public class SinkKafkaDemo {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        DataStreamSource<String> source = env.socketTextStream("localhost", 9999);
        //精准一次要设置CheckPoint
        env.enableCheckpointing(1000, CheckpointingMode.EXACTLY_ONCE);
        // 创建kafka sink
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                // kafka集群地址
                .setBootstrapServers("localhost:9092")
                //指定序列化器，topic和具体的序列化
//                .setRecordSerializer(KafkaRecordSerializationSchema.<String>builder()
//                        .setTopic("test")
//                        .setValueSerializationSchema(new SimpleStringSchema())
//                        .build())
                //自定义序列化器
                .setRecordSerializer(new KafkaRecordSerializationSchema<String>() {
                    @Nullable
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String s, KafkaSinkContext kafkaSinkContext, Long aLong) {
                        String[] data = s.split(",");
                        byte[] key = data[0].getBytes(StandardCharsets.UTF_8);
                        byte[] value = s.getBytes(StandardCharsets.UTF_8);
                        return new ProducerRecord<>("test", key, value);
                    }
                })
                // 保证消息的actly once,开启两阶段提交
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                //精准一次，要设置事务ID的前缀
                .setTransactionalIdPrefix("json-")
                //精准一次，必需设置事务的超时时间，小于最大的15分钟,要大于CheckPoint的时间
                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "30000")
                .build();
        source.sinkTo(kafkaSink);

        env.execute("KafkaDemo");
    }
}
