package util;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import util.FlinkConstant._ENV;
import util.FlinkConstant._Flink;
import util.FlinkConstant._Kafka;

import java.util.Properties;

/**
 * @author yue.cao
 * @since 10-28-2020
 * <p>
 * 用到的命令
 * sh kafka-consumer-groups.sh --new-consumer --bootstrap-server caoyuesh4:9092 --group cyGroupId --describe
 * sh kafka-console-producer.sh --broker-list caoyuesh4:9092 --topic cy4
 * <p>
 * <p>
 * 1.9的最佳实践 关于读取配置文件等
 * https://ci.apache.org/projects/flink/flink-docs-release-1.9/dev/best_practices.html
 */
public class FlinkUtils {

	private FlinkUtils() {}

	private static StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();


	public static StreamExecutionEnvironment getEnv() {
		return env;
	}

	public static void execute() throws Exception {
		env.execute();
	}

	public static DataStream<String> createCommonKafkaStream() throws Exception {
		return createKafkaStream(
				ParameterTool.fromPropertiesFile(FlinkUtils.class.getResourceAsStream("/config.properties")),
				_Kafka._Topic.CY4,
				_Kafka._Group_id.COMMON,
				SimpleStringSchema.class
		);
	}

	public static <T> DataStream<T> createKafkaStream(
			ParameterTool parameterTool,
			String topic,
			String groupId,
			Class<? extends DeserializationSchema<T>> clazz) throws Exception {
		ParameterTool propertiesTool = ParameterTool.fromPropertiesFile(FlinkUtils.class.getResourceAsStream("/config.properties"));
		if (parameterTool == null) {
			parameterTool = propertiesTool;
		} else {
			parameterTool.mergeWith(propertiesTool);
		}

		// parameterTool.fromPropertiesFile(FlinkUtils.class.getResourceAsStream("/prop/x.properties"))
		// parameterTool.getRequired("test");
		// parameterTool.getLong("test",100);
		// 开启checkpoint 同时开启重启策略
		env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
		// 一共最多尝试重启2次 每次延迟1秒
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 1000));
		// 设置stateBackEnd

		if (_ENV.DEV.equals(parameterTool.get("env"))) {
			env.setStateBackend(new FsStateBackend(_Flink.FSSTATE_BACK_END_PATH));
		}
		// 不删除checkpoint
		env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

		// 设置全局参数
		env.getConfig().setGlobalJobParameters(parameterTool);

		Properties prop = new Properties();
		prop.setProperty("bootstrap.servers", _Kafka._Server.BOOTSTRAP_SERVERS);
		prop.setProperty("group.id", groupId);
		// 如果没有记录偏移量 则从设置的方式开始消费
		prop.setProperty("auto.offset.reset", "latest");
		// 关闭自动提交 重要!!!
		prop.setProperty("enable.auto.commit", "false");

		FlinkKafkaConsumer<T> kafkaConsumer = new FlinkKafkaConsumer<>(
				topic,
				clazz.newInstance(),
				prop
		);
		//默认为true false表示不提交offset到kafka 只通过checkpoint保存 不建议这样使用 因为保留kafka的部分可以用做监控
		kafkaConsumer.setCommitOffsetsOnCheckpoints(true);
		kafkaConsumer.setStartFromLatest();
		return env.addSource(kafkaConsumer);
	}


	public static void addTroubleStreamForTestError() {
		env.socketTextStream("caoyuesh1", 4444).map(new MapFunction<String, String>() {
			@Override
			public String map(String value) throws Exception {
				return (1 / 0) + "";
			}
		});
	}


}
