package com.atguigu.realtime.app;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static sun.misc.PostVMInitHook.run;

/**
 * @ClassName: BaseTenvApp
 * @Description:
 * @Author: kele
 * @Date: 2021/4/27 10:05
 *
 *  添加通过sql的方式处理数据
 *
 *
 **/
abstract public class BaseTenvApp {

    protected abstract void run(StreamTableEnvironment tenv);

    public void init(int port,int parallelism,String checkpointDir) {

        System.setProperty("HADOOP_USER_NAME","atguigu");

        Configuration conf = new Configuration();
        conf.setInteger("rest.port",port);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(parallelism);

        //设置checkpoint的类型，及保存到的位置
        env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/gmall1026/" + checkpointDir));

        /**
         * 设置checkpoint
         */
        //设置严格一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        //设置超时时间，一分钟没checkpoint认为失败
        env.getCheckpointConfig().setCheckpointTimeout(60000);

        //设置job在中止之后可以继续运行
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        //设置每次只有一个checkpint执行
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);

        //开启checkpoint,5s进行一次checkpoint
        env.enableCheckpointing(5000);

        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        run(tenv);
    }


}
