package com.lzwk.base;

import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @title: BaseAppV1
 * @Author Yeaser
 * @Date: 2022/1/28 17:30
 * @Version 1.0
 * Description: TODO
 */
public abstract class BaseAppV1 {
    public void init( int p, String ck,String jobName,String flag) {
        StreamExecutionEnvironment env=StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(p);
        if("test".equals(flag)){
            env.setStateBackend(new HashMapStateBackend());

        }else {
            env.setStateBackend(new EmbeddedRocksDBStateBackend());
            env.getCheckpointConfig().setCheckpointStorage("hdfs://10.2.16.4:4007/flink_113/checkpoints/" + ck);
            env.enableCheckpointing(600000L);
            env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            env.getCheckpointConfig().setCheckpointTimeout(100000L);
            env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(60 * 1000 * 2);
            env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        }

        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        // 不同的app有不同 的业务逻辑
        run(env,tenv);
        try {
            //env.execute(jobName);
            env.execute(jobName).wait();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    protected abstract void run(StreamExecutionEnvironment env,StreamTableEnvironment tenv);
}
