package com.arch.flink.state;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.restartstrategy.RestartStrategies.RestartStrategyConfiguration;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.Timestamp;
import java.util.concurrent.TimeUnit;

/**
 *
 * 功能描述: 演示开启Checkpoint之后，Flink作业异常后的默认重启作业策略（固定频率）。
 * 操作步骤:
 *   1. 直接运行程序，env.enableCheckpointing(2000)之后，作业是fixed-delay重启策略。
 *
 * @author pizhihui
 * @date 2024-05-28 23:09
 */
public class D4_EnableCheckpointRestart {


    private final static Logger log = LoggerFactory.getLogger(D4_EnableCheckpointRestart.class);

    public static void main(String[] args) throws Exception {


        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //
//        RestartStrategyConfiguration strategy = RestartStrategies.
//                failureRateRestart(5, Time.of(5, TimeUnit.MINUTES), Time.of(2, TimeUnit.SECONDS));
//        env.setRestartStrategy(strategy);

        /**
         * org.apache.flink.runtime.state.StateBackendLoader.loadFromApplicationOrConfigOrDefaultInternal(StateBackendLoader.java:263) [] - No state backend has been configured, using default (HashMap) org.apache.flink.runtime.state.hashmap.HashMapStateBackend@3baf3a7c
         * org.apache.flink.runtime.state.StateBackendLoader.fromApplicationOrConfigOrDefault(StateBackendLoader.java:321) [] - State backend loader loads the state backend as HashMapStateBackend
         * org.apache.flink.runtime.state.CheckpointStorageLoader.createJobManagerCheckpointStorage(CheckpointStorageLoader.java:274) [] - Checkpoint storage is set to 'jobmanager'
         */
        env.enableCheckpointing(1000);
        // 默认: EXACTLY_ONCE
        System.out.println(env.getCheckpointConfig().getCheckpointingMode());

        DataStreamSource<Tuple3<String, Integer, Long>> source = env.addSource(new SourceFunction<Tuple3<String, Integer, Long>>() {

            @Override
            public void run(SourceContext<Tuple3<String, Integer, Long>> ctx) throws Exception {
                int index = 1;
                while (true) {
                    // 1s 产生一条数据
                    ctx.collect(Tuple3.of("key", index++, System.currentTimeMillis()));
                    Thread.sleep(1000);
                }
            }

            @Override
            public void cancel() {

            }
        });

        source.map(new MapFunction<Tuple3<String, Integer, Long>, Tuple3<String, Integer, String>>() {
            @Override
            public Tuple3<String, Integer, String> map(Tuple3<String, Integer, Long> event) throws Exception {
                if (event.f1 % 10 == 0) {
                    String msg = String.format("Bad data [%d] ...", event.f1);
                    log.error(msg);
                    throw new RuntimeException(msg);
                }
                return Tuple3.of(event.f0, event.f1, new Timestamp(System.currentTimeMillis()).toString());

            }
        }).print();


        env.execute("no restart job");

    }
}
