package com.wuwangfu.exactlyonce;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

import java.util.Properties;

/**
 * @Description：FlinkKafkaProduce实现ExactlyOnce
 * @Author：jcshen
 * @Date：2023-07-02
 *
 * 从Kafka读取数据，并且将数据处理后写回到Kafka
 * 要求：保证数据的一致性
 * ExactlyOnce（Source可以记录偏移量【重放】，如果出现异常，偏移量不更新），Sink要求支持事务
 * 开启Checkpointing，Source的偏移量保存到状态中（OperatorState），然后将处理的数据也保存状态中
 *
 *
 *
 */
public class KafkafToKafka {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //开启checkpinting
        env.enableCheckpointing(30000, CheckpointingMode.EXACTLY_ONCE);
        env.setStateBackend(new FsStateBackend("hdfs://node-01:9000/flink/checkpoint"));
        /*设置Kafka相关参数*/
        Properties proper = new Properties();
        //设置Kafka的地址和端口
        proper.setProperty("bootstrap.servers","node-01:9092");
        //读取偏移量策略：如果没有记录偏移量，就从头读，如果记录过偏移量，就接着读
        proper.setProperty("auto.offset.reset","earliest");
        //设置消费者组
        proper.setProperty("group.id","g20230702");
        //没有开启checkpinting，不让flink提交偏移量的消费者定期自动提交偏移量
        proper.setProperty("enable.auto.commit","false");
        //创建FlinkKafkaConsumer并传入相关参数
        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(
                "wc",//topic名称
                new SimpleStringSchema(),//反序列化Schema
                proper
        );
        //在checkpintting时，不将偏移量写入到Kafka特殊topic中
        kafkaConsumer.setCommitOffsetsOnCheckpoints(false);
        //使用addSource添加kafkaConsumer
        DataStreamSource<String> line = env.addSource(kafkaConsumer);

        SingleOutputStreamOperator<String> filtered = line.filter(e -> !e.startsWith("error"));

        //默认使用的是 Semantic.AT_LEAST_ONCE，不能实现ExactlyOnce
//        FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<>( "node-01:9092","out", new SimpleStringSchema());

        /*写入Kafka的topic，保证 EXACTLY_ONCE*/
        //等待事务状态更新的最大时间，要大于checkpoint时间，且小于 15分钟
        proper.setProperty("transaction.timeout.ms",1000 * 60 * 5 + "");
        //创建 FlinkProducer
        FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<>(
                "elt",//topic名称
                new KafkaStringSerializationSchema("etl"),//指定写入Kafka的序列化schema（自定义）
                proper,//Kafka相关参数
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE//指定写入Kafka语义为ExactlyOnce
        );

        /*添加KafkaSink*/
        filtered.addSink(kafkaProducer);

        env.execute();
    }
}
