package org.bigwinner.flinkLearning;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.bigwinner.flinkLearning.Constants.ConstantsValue;
import org.bigwinner.flinkLearning.Functions.String2MysqlMessagePF;
import org.bigwinner.flinkLearning.Message.MysqlMessage;
import org.bigwinner.flinkLearning.Sinks.MySqlTwoPhaseCommitSink;

import java.util.Properties;

/**
 * @author: IT大狮兄
 * @date: 2021/7/30 下午6:17
 * @version: 1.0.0
 * @description: 两阶段提交demo
 */
public class TwoPhaseCommitDemo {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        environment.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);
        environment.setParallelism(1);

        // state backend
        environment.setStateBackend(new FsStateBackend("hdfs://lsl001:8020/checkpoint/flink-checkpoint"));
        // checkpoint
        //每隔60s进行启动一个检查点【设置checkpoint的周期】,设置模式为：exactly_one，仅一次语义
        environment.enableCheckpointing(30000, CheckpointingMode.EXACTLY_ONCE);
        // checkpoint 最多容忍三次失败
        environment.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
        //检查点必须在10s之内完成，或者被丢弃【checkpoint超时时间】
        environment.getCheckpointConfig().setCheckpointTimeout(30000);
        //同一时间只允许进行一次检查点
        environment.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        //表示一旦Flink程序被cancel后，会保留checkpoint数据，以便根据实际需要恢复到指定的checkpoint
        environment.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // restart strategies
        environment.setRestartStrategy(RestartStrategies.fixedDelayRestart(4, Time.seconds(10)));

        // source
        Properties properties = new Properties();
        properties.put("enable.auto.commit", "false");
        properties.setProperty("bootstrap.servers", ConstantsValue.KAFKA_BOOTSTRAP_SERVERS);
        properties.setProperty("group.id", ConstantsValue.KAFKA_GROUP_ID);
        properties.setProperty("flink.poll-timeout", "120000");
        properties.setProperty("default.api.timeout.ms", "120000");

        FlinkKafkaConsumer<String> kafkaDS = new FlinkKafkaConsumer<>(ConstantsValue.KAFKA_CONSUMER_TOPIC,
                new SimpleStringSchema(), properties);

        DataStreamSource<String> dataStreamSource = environment.addSource(kafkaDS);
        SingleOutputStreamOperator<MysqlMessage> outputStreamOperator = dataStreamSource
                .process(new String2MysqlMessagePF<String, MysqlMessage>())
                .name("json2message");
        outputStreamOperator.addSink(new MySqlTwoPhaseCommitSink()).name("mysql_sink");
        environment.execute("two_phase_commit_mysql_demo");

    }
}
