package com.atguigu.Flink.checkpoint;

import com.alibaba.fastjson.JSON;
import com.atguigu.Flink.POJO.Event;
import org.apache.commons.lang3.RandomUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.time.Duration;

public class Flink02_kafkaEOS {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        //开启检查点
        env.enableCheckpointing(5000L , CheckpointingMode.EXACTLY_ONCE) ;

        KafkaSource<String> kafkaSource =
                KafkaSource.<String>builder()
                        .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
                        .setTopics("topicA")
                        .setGroupId("flink4")
                        //.setDeserializer()  // 设置 key和value的反序列化器
                        .setValueOnlyDeserializer(new SimpleStringSchema())  // 设置Value的反序列化器
                        //默认使用记录的offset进行消费， 如果需要重置offset，指定重置到LATEST
                        .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST))
                        //别的一些设置，如果没有提供直接的方法， 使用如下的方法进行设置
                        //.setProperty("enable.auto.commit" , "true" )
                        .setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG , "true")
                        //.setProperty("auto.commit.interval.ms" , "5000")
                        .setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG , "5000")
                        //对于下游的消费者，消费kafka中的数据，Kafka中有可能存在预提交的数据， 因此要设置隔离级别为读已提交
                        .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG , "read_committed")
                        .build();

        SingleOutputStreamOperator<Event> ds = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka-source")
                .map(
                        line -> {
                            String[] fields = line.split(",");
                            return new Event(fields[0].trim(), fields[1].trim(), Long.valueOf(fields[2].trim()));
                        }
                ).assignTimestampsAndWatermarks(
                        WatermarkStrategy.<Event>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                                .withTimestampAssigner(
                                        (event ,ts ) -> event.getTs()
                                )
                ) ;
        //Kafka Sink
        KafkaSink<String> kafkaSink =
                KafkaSink.<String>builder()
                        .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
                        //消息不带key
                        .setRecordSerializer(
                                KafkaRecordSerializationSchema.<String>builder()
                                        .setValueSerializationSchema(new SimpleStringSchema())
                                        .setTopic("topicB")
                                        .build()
                        )
                        //提交保障
                        // 精确一次， 要求开启检查点， 还要设置事务的前缀
                        .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                        // 设置事务ID前缀
                        .setTransactionalIdPrefix("flink" + RandomUtils.nextInt(1 , 100))
                        // 设置kafka生产者事务超时时间 < Kafka Broker事务的超时时间
                        .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG , "300000")  //5分钟
                        .build();

        //flink的处理
        ds.map(
                JSON::toJSONString
        ).sinkTo(
                kafkaSink
        ) ;

        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
