package com.atguigu.gmall.realtime.app;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.HashMap;

/**
 * @author Archie
 * @date 2021-10-27 18:19
 * @description
 */
public abstract class BaseSqlApp {

	public abstract void run(StreamTableEnvironment tenv);

	public void init(int port, int parallelism, String ck) {
		Configuration conf = new Configuration();
		conf.setInteger("rest.port", port);
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
		env.setParallelism(parallelism);
		env.enableCheckpointing(3000, CheckpointingMode.EXACTLY_ONCE); // 每3s一次ck，且语义为精准一次
		env.getCheckpointConfig().setCheckpointTimeout(60000); // 一分钟内没有完成的checkpoint将被丢弃
		env.getCheckpointConfig() // job终止后，依然保持 externalized checkpoints
				.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
		env.setStateBackend(new HashMapStateBackend());
		env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop101:9820/gmall2021/flink/ck/" + ck);

		StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

		run(tenv);
		try {
			env.execute(ck);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	;

}
