package cn.itcast.flink.exactlyonce;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.Properties;
import java.util.Random;

import static org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase.KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS;

/**
 * Author itcast
 * Date 2022/1/17 10:10
 * Desc 从kafka中读取数据并写入数据，保证的仅一次语义
 */
public class FlinkKafkaWriter {
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME","root");
        //todo 获取流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //设置当前全局的并行度
        env.setParallelism(1);
        //todo 设置chk 1s 状态后端到hdfs或本地file，并行度
        //开启 checkpoint ，按照 1s 将全局的状态保存到 checkpoint 中
        env.enableCheckpointing(1000);
        //先获取读写 checkpoint 配置
        CheckpointConfig conf = env.getCheckpointConfig();
        //设置 hdfs ，将 全局的checkpoint保存到 hdfs 上
        conf.setCheckpointStorage("hdfs://node1:8020/flink-checkpoints/");
        //todo 设置chk属性配置，仅一次模式、超时、并行、容忍、最小间隔、取消任务保存chk
        //设置 checkpoint 的超时时间，超过1分钟
        conf.setCheckpointTimeout(60 * 1000);
        //设置当前checkpoint的执行模式是 仅一次语义
        conf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //设置checkpoint并行数为 1
        conf.setMaxConcurrentCheckpoints(1);
        //设置容忍失败的checkpoint的次数，如果超过这个次数，程序job会跟着checkpoint失败
        conf.setTolerableCheckpointFailureNumber(10);
        //设置两个 checkpoint 之间的最短间隔时间
        conf.setMinPauseBetweenCheckpoints(500);
        //设置 checkpoint 在job取消的时候，是否删除checkpoint，在HDFS上存储 /flink-checkpoints/ metastore 元数据
        conf.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //todo 设置重启策略 3次，10s间隔
        //设置重启策略，固定重启策略，重启 3次， 每次之间间隔 5秒
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 5000));
        //todo 配置kafka consumer 属性：服务器、消费组、重置从最新、自动发现分区
        Properties props = new Properties();
        //设置 flink 连接 kafka集群的服务器地址
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092,node2:9092,node3:9092");
        //设置 flink 消费 kafka数据的消费者组，每个消费者组内，消费 1 份数据
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "_flink_kafka_consumer_");
        //设置flink 消费的offset提交给 kafka broker
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true + "");
        //设置flink自动提交的时间间隔
        props.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10 * 1000 + "");
        //自动分区发现
        props.setProperty(KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS, 10 * 60 * 1000 + "");

        //创建flink消费kafka的实例
        FlinkKafkaConsumer consumer = new FlinkKafkaConsumer(
                "flinkkafka"
                , new SimpleStringSchema()
                , props
        );
        //todo 设置consumer设置从最新的读取
        //提交 offset 提交到 checkpoint 去维护
        consumer.setCommitOffsetsOnCheckpoints(true);
        //设置如果 job 程序报错，从最新的offset的数据继续消费
        consumer.setStartFromLatest();

        //todo 添加kafka数据源
        DataStreamSource<String> source = env.addSource(consumer);
        //todo 切分单词并记1。遍历每个单词中，随机从0~4中给一个值，如果该值大于3就模拟异常bug，将[单词，1]收集
        SingleOutputStreamOperator<Tuple2<String, Integer>> flatMapStream = source.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            Random rm = new Random();

            @Override
            public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                //将每行数据进行切分
                String[] words = value.split(" ");
                //遍历每个单词
                for (String word : words) {
                    //随机给数字
                    int random = rm.nextInt(5);
                    //如果当前的数字大于3，抛出一个异常
                    if (random > 3) {
                        throw new RuntimeException("1/0 bug");
                    }
                    //将 [word,1] 返回
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });
        //todo 对数据流进行分组、聚合
        SingleOutputStreamOperator<String> result = flatMapStream.keyBy(t -> t.f0)
                .sum(1)
                //todo 对最终word和count进行map映射成 word:::count hello:::23
                .map(t -> t.f0 + ":::" + t.f1);

        //todo 设置写到kafka的属性 服务器和事务超时时间 5s
        //todo 创建 FlinkKafkaProducer
        FlinkKafkaProducer<String> producer = new FlinkKafkaProducer(
                "node1:9092,node2:9092,node3:9092",
                "flink2kafka",
                new SimpleStringSchema()
        );
        //todo 将 producer 添加到sink
        result.addSink(producer);
        //todo 执行流环境
        env.execute();
    }
}
