package streamAPI.checkpoint;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.testng.annotations.Test;
import util.FlinkConstant._Flink;
import util.FlinkUtils;

public class DemoWithState {

	public static void main(String[] args) throws Exception {
		DemoWithState state = new DemoWithState();
		state.demoWithKeyedState();
	}


	@Test(description = "任务每次重新开始都会新建一个checkpoint地址 并且在程序结束后删掉checkpoint")
	public void t0() throws Exception {
		// System.setProperty("HADOOP_USER_NAME","root");
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.enableCheckpointing(5000);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000));
		env.setStateBackend(new FsStateBackend(_Flink.FSSTATE_BACK_END_PATH));
		DataStreamSource<String> socketTextStream = env.socketTextStream("caoyuesh1", 4444);
		socketTextStream.map(new MapFunction<String, Tuple2<String, Long>>() {
			@Override
			public Tuple2<String, Long> map(String value) throws Exception {
				if (value.startsWith("cy")) {
					throw new RuntimeException("Cy来了,程序挂了！！");
				}
				return Tuple2.of(value, 1L);
			}
		}).keyBy(0).sum(1).print();
		env.execute();
	}

	@Test(description = "使用checkpoint 并且任务关闭时候 不删除checkpoint" +
			"前提是关闭的情况为2种  1.在UI上点击CANCEL 2.超过配置的重启策略后")
	public void t1() throws Exception {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.enableCheckpointing(5000);
		//一共最多尝试重启3次 每次延迟2秒
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000));
		env.setStateBackend(new FsStateBackend(_Flink.FSSTATE_BACK_END_PATH));
		env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
		DataStreamSource<String> socketTextStream = env.socketTextStream("caoyuesh1", 4444);
		socketTextStream.map(new MapFunction<String, Tuple2<String, Long>>() {
			@Override
			public Tuple2<String, Long> map(String value) throws Exception {
				if (value.startsWith("cy")) {
					throw new RuntimeException("Cy来了,程序挂了！！");
				}
				return Tuple2.of(value, 1L);
			}
		}).keyBy(0).sum(1).print();
		env.execute();
	}

	@Test(description = "keyedState Learn 了解keyedState其实不用自己再保存 key了 直接用value即可")
	public void demoWithKeyedState() throws Exception {
		DataStream<String> source = FlinkUtils.createCommonKafkaStream();
		source.keyBy(new KeySelector<String, String>() {
			@Override
			public String getKey(String value) throws Exception {
				return value;
			}
		}).map(new RichMapFunction<String, Tuple2<String, Long>>() {
			private transient ValueState<Long> valueState;

			@Override
			public void open(Configuration parameters) throws Exception {
				super.open(parameters);
				ValueStateDescriptor<Long> descriptor = new ValueStateDescriptor<>(
						"word-count",
						Types.LONG
				);
				valueState = getRuntimeContext().getState(descriptor);
			}

			@Override
			public Tuple2<String, Long> map(String inputWord) throws Exception {
				Long historyWordCount = valueState.value();
				if (historyWordCount == null) {
					valueState.update(1L);
					return Tuple2.of(inputWord, 1L);
				}
				valueState.update(historyWordCount + 1);
				return Tuple2.of(inputWord, historyWordCount + 1);
			}
		}).print();
		FlinkUtils.addTroubleStreamForTestError();
		FlinkUtils.execute();
	}

	@Test(description = "实现并行可持续读 文件的source 并且实现operateState 保证 ExactlyOnce" +
			"需要在路径下准备 0 1 2个文件" +
			"echo 'a' >> 0")
	public void demoWithOperateStateExactlyOnce() throws Exception {
		StreamExecutionEnvironment env = FlinkUtils.getEnv();
		env.enableCheckpointing(5000,CheckpointingMode.EXACTLY_ONCE);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 1000));
		env.setStateBackend(new FsStateBackend(_Flink.FSSTATE_BACK_END_PATH));
		env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
		env.setParallelism(2);
		env.addSource(new MyAtLatestOnceParallelFileSource("D:/test")).print();
		FlinkUtils.addTroubleStreamForTestError();
		FlinkUtils.execute();
	}
}

