package com.atguigu.flink.state;

import org.apache.commons.lang3.RandomUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.ProducerConfig;

/**
 * Created by 黄凯 on 2023/6/25 0025 16:36
 *
 * @author 黄凯
 * 永远相信美好的事情总会发生.
 * <p>
 * kafka -> flink -> kafka 端到端精确一次编码
 * *
 * * KafkaSource:
 * *   注意: 预提交的数据不能读取， 只能读取到真正commit的数据
 * *      隔离级别:
 * *         读未提交
 * *         读已提交
 * *         可重复读
 * *         序列化
 * *     Kafak隔离级别   isolation.level
 * *         [read_committed, read_uncommitted]
 * *
 * *
 * * KafkaSink:
 * *    注意:
 * *       .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
 * *       .setTransactionalIdPrefix("flink" + RandomUtils.nextInt(1, 100))
 * *       .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG , "600000")
 */
public class Flink03_kafkaToFlinkToKafkaEOS {

    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);

        //kafkaSource
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers("hadoop102:9092,hadoop103:9092")
                .setGroupId("flink1")
                .setTopics("topicA")
                .setValueOnlyDeserializer(new SimpleStringSchema()) //仅针对于没有key的消息
                .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))
                //如果其他的设置没有对应的方法， 统一使用.setProperty( 配置项 ， 配置值)
                .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed")
                .build();

        DataStreamSource<String> ds = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafkaSource");

        //KafkaSink
        //将流中的数据写入到Kafka中
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                .setBootstrapServers("hadoop102:9092, hadoop103:9092")
                .setRecordSerializer(

                        KafkaRecordSerializationSchema.<String>builder()
                                .setTopic("topicA")
                                .setValueSerializationSchema(new SimpleStringSchema())
                                .build()

                )

                // EOS(Exactly Once semantic): 精确一次,
                // AT_LEAST_ONCE : 至少一次
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)

                // 如果将来使用精确一次，KafkaSink要求必须设置事务ID的前缀
                .setTransactionalIdPrefix("flink" + RandomUtils.nextInt(1, 100))
                //kafkaSink默认的生产者事务的超时时间为: 1 hour
                //KafkaBroker默认允许的事务的最大超时时间: 15 minutes
                //要求生产者事务的超时时间不能超过KafkaBroker允许的事务最大超时时间
                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "600000")
                //如果还有其他的配置， 可以直接使用setProperty
                //.setProperty(ProducerConfig.ACKS_CONFIG, "-1")
                .build();

        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }

    }

}
