package com.atguigu.gmall.realtime.app;

import com.atguigu.gmall.realtime.util.FlinkSourceUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @author Archie
 * @date 2021-10-17 15:04
 * @description
 */
public abstract class BaseAppV1 {

	/**
	 * 业务处理-顶层抽象方法
	 * @param env
	 * @param stream
	 */
	protected abstract void run(StreamExecutionEnvironment env, DataStreamSource<String> stream);

	/**
	 * 通用型环境搭建
	 * @param port 端口号
	 * @param parallelism 并行度
	 * @param ck checkpoint
	 */
	public void init(int port, int parallelism, String ck, String groupId, String topic) {

		// 0. 执行环境
		Configuration configuration = new Configuration();
		configuration.setInteger("rest.port", port);
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(parallelism);

		/*>>> 设置CK相关的参数 <<<*/
		// 1. 设置精准一次性保证（默认）  每5000ms开始一次checkpoint
		env.enableCheckpointing(3000, CheckpointingMode.EXACTLY_ONCE);
		// 2. Checkpoint必须在一分钟内完成，否则就会被抛弃
		env.getCheckpointConfig().setCheckpointTimeout(60000);
		// 3.开启在 job 中止后仍然保留的 externalized checkpoints
		env
				.getCheckpointConfig()
				.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
		// 4. 设置状态后端
		env.setStateBackend(new HashMapStateBackend());
		env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop101:9820/gmall2021/flink/ck/" + ck);

		/*>>> 获取流 <<<*/
		DataStreamSource<String> stream = env.addSource(FlinkSourceUtil.getKafkaSource(groupId, topic));
		run(env, stream);

		try {
			env.execute(ck);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}



}
