package com.atguigu.sink;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchemaBuilder;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;

/**
 * @author yhm
 * @create 2024-04-03 14:27
 */
public class Test03_KafkaSinkKey {
    public static void main(String[] args) throws Exception {
        // 1. 创建环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 8081);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);

        // kafka Sink 想要实现精准一次 需要开启检查点
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);

        // 2. 读取数据源
        DataStreamSource<String> streamSource = env.socketTextStream("hadoop102", 7777);


        // 3. 处理数据
        // 4. 输出
        // 输出数据的结构: (1) 根据提高的组件连接器,创建对应的sink
        // (2) 根据创建的sink类选择不同的api
        // sinkTo(sink)   addSink(sinkFunction)
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                // 核心参数都是填写在builder里面的
                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")//连接地址
                .setRecordSerializer(new KafkaRecordSerializationSchema<String>() {
                    @Nullable
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String element, KafkaSinkContext context, Long timestamp) {
                        // 如果对特定的数据有排序要求
                        String[] split = element.split(",");
                        return new ProducerRecord<>("ws",split[0].getBytes(),element.getBytes());
                    }
                })
                // 设置精准一次性
                // 满足3个条件
                // (1) 需要设置事务前缀
                // (2) 需要设置超时时间   检查点时间 < 超时时间 < 15min
                // (3) 开启检查点
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                .setTransactionalIdPrefix("atguigu-")
                // 超时时间
                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 15 * 1000 + "")
                .build();
        streamSource.sinkTo(kafkaSink);

        // 5. 执行环境
        env.execute();
    }
}
