package com.shujia.flink.state;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class Demo2Checkpoint {
    public static void main(String[] args) throws Exception {
        //1、创建FLink执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 开启 checkpoint

        // 每 1000ms 开始一次 checkpoint
        //env.enableCheckpointing(5000);

        // 高级选项：
        // 当手动取消任务时，是否保留HDFS中保留hdfs中的快照
        //env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        /*
         * flink计算的状态会先保存在taskmanager中，当触发checkpoint时会将状态持久化到hdfs中
         */
        //指定状态在算子中保存的位置（状态后端）
        //HashMapStateBackend：将状态保存在taskmanager的内存中
        //env.setStateBackend(new HashMapStateBackend());

        //指定checkpoint保存快照的位置
        //env.getCheckpointConfig().setCheckpointStorage("hdfs://master:9000/flink/checkpoint");


        //2、读取数据
        //nc -lk 8888
        DataStream<String> wordsDS = env.socketTextStream("master", 8888);

        //3、统计单词的数量
        DataStream<Tuple2<String, Integer>> kvDS = wordsDS
                .map(word -> Tuple2.of(word, 1), Types.TUPLE(Types.STRING, Types.INT));

        //分组统计单词的数量
        KeyedStream<Tuple2<String, Integer>, String> keyByDS = kvDS.keyBy(kv -> kv.f0);

        //对下标为1的列求和
        DataStream<Tuple2<String, Integer>> countDS = keyByDS.sum(1);

        //打印数据
        countDS.print();

        //启动flink
        env.execute();
    }
}
