package com.atguigu.flink.state;

import org.apache.commons.lang3.RandomUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.ProducerConfig;

/**
 * Created by Smexy on 2023/4/12
 *
 *      Job1 -----> topicA<------ Job2------>topicB
 *
 *      job2如何保证端到端的精确一次
 *          1.source 选择可以重置offset的source
 *                  读取的事务隔离级别，不能是READ UNCOMMITTED
 *                  KafkaSource
 *          2.整个计算过程中，需要开启ck，设置EOS语义
 *          3.sink需要 2PC提交的sink
 *                  KafkaSink
 *
 *                      当前客户端事务的超时时间，超过了broker允许的最大值(15min)。
 *                      The transaction timeout is larger than the maximum value allowed by the broker
 *                      (as configured by transaction.max.timeout.ms).
 *                          默认情况，事务的超时时间 > 15min，修改让它 < 15min
 *
 *
 */
public class Demo14_KafkaEOS
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(2);

        //2.整个计算过程中，需要开启ck，设置EOS语义
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);

       //1.source 选择可以重置offset的source
        KafkaSource<String> kafkaSource = KafkaSource
            .<String>builder()
            .setBootstrapServers("hadoop102:9092")
            .setGroupId("atguigutest")
            .setTopics("topicC")
            .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed")
            .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))
            .setValueOnlyDeserializer(new SimpleStringSchema())
            .setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true")
            .setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "500")
            .build();


        KafkaSink<String> kafkaSink = KafkaSink
            .<String>builder()
            .setBootstrapServers("hadoop102:9092")
            .setRecordSerializer(
                KafkaRecordSerializationSchema.builder()
                                              .setTopic("topicA") //写到哪个主题
                                              .setValueSerializationSchema(new SimpleStringSchema())
                                              .build()
            )
            .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,60 * 5 * 1000 + "")
            .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
            //EOS，必须要设置,不同的Job，事务id前缀必须不同
            .setTransactionalIdPrefix("atguigu-"+ RandomUtils.nextInt(1,1000))
            .build();


        //把source添加到环境中
        DataStreamSource<String> ds = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafkaSource");

        ds.sinkTo(kafkaSink);
        ds.addSink(new SinkFunction<String>()
           {
               @Override
               public void invoke(String value, Context context) throws Exception {
                   if (value.contains("s6")){
                       throw new RuntimeException("出异常了....");
                   }
                   System.out.println(value);
               }
           });


        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
}
