package cn.itcast.flink.base;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.util.Properties;
import java.util.Random;

/**
 * Author itcast
 * Date 2021/7/31 11:11
 * Desc TODO
 */
public class Flink_Kafka_End2End {
    public static void main(String[] args) throws Exception {
        //1.创建流环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //2.保证1秒钟提交一次checkpoint，用于将当前的source、transformation、sink的中间状态保存起来
        env.enableCheckpointing(1000);
        //3.将checkpoint存储到磁盘，开发存储到本地 file:///D:/ckp
        env.setStateBackend(new FsStateBackend("file:///D:/chk"));
        //4.设置两个checkpoint 之间的最短时间为500毫秒
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        //5.设置容忍最多checkpoint失败的次数为10次，默认是0，表示不容忍检查点失败
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(10);
        //6.当作业被取消时，保留外部的检查点  RETAIN_ON_CANCELLATION
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.
                RETAIN_ON_CANCELLATION);
        //7.设置检查点的默认模式为 Exactly_once
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //8.设置检查点的超时时间为10分钟
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        //9.设置默认同一个时间有多少个检查点，设置一个
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        //10.设置重启策略，用于程序报错，能够重启程序恢复程序，固定恢复3次
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000));
        //11.以下为连接kafka source 数据源
        //12.配置 kafka 的属性 ，参考之前的kafka的属性
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092,node2:9092,node3:9092");
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        props.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "2000");
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "flinkGroup");
        props.setProperty("flink.partition-discovery.interval-millis", "5000");
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        //13.初始化 FlinkKafkaConsumer
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>(
                "flink_kafka",
                new SimpleStringSchema(),
                props
        );
        //14.配置 kafka数据源 从最新的数据读取数据
        consumer.setStartFromLatest();
        //15.（重要）设置提交偏移量到每次保存checkpoint
        consumer.setCommitOffsetsOnCheckpoints(true);
        //16.添加 kafka source
        DataStreamSource<String> source = env.addSource(consumer);
        //17.Transformation 用空格切割每个单词转换成 Tuple2<String,Integer>，转换的过程中可以生成个bug
        SingleOutputStreamOperator<Tuple2<String, Long>> flatMapStream = source.flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
            Random rn = new Random();

            @Override
            public void flatMap(String value, Collector<Tuple2<String, Long>> out) throws Exception {
                String[] words = value.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1L));
                    int i = rn.nextInt(5);
                    if (i > 3) {
                        System.out.println("出 bug 了...");
                        throw new RuntimeException("出 bug 了...");
                    }
                }
            }
        });
        //用于查看是否会重启并恢复 offset
        //18.分组和聚合
        SingleOutputStreamOperator<String> result = flatMapStream.keyBy(t -> t.f0)
                .sum(1)
                .map(t -> t.f0 + ":::" + t.f1);
        //19.将得到的 word和单词数 map 转换拼接成一个String
        //20.sink落地，初始化 FlinkKafkaProducer，并支持 Exactly_once 语义
        Properties props_sink = new Properties();
        props_sink.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092,node2:9092,node3:9092");
        props_sink.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 1000 * 5 + "");
        FlinkKafkaProducer<String> producer = new FlinkKafkaProducer<String>(
                "flink_kafka",
                new SimpleStringSchema(),
                props_sink
        );
        //21.将结果落地
        result.addSink(producer);
        //22.执行环境
        env.execute();
    }
}
