package com.shujia.flink.state;

import lombok.AllArgsConstructor;
import lombok.Data;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

/**
 * @author shujia
 */
public class Demo2ValueState {
    public static void main(String[] args) throws Exception {
        //创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        /*
         * 开启checkpoint: 定时将计算的状态保存到HDFS中
         */
        //指定checkpoint间隔时间
        env.enableCheckpointing(20000);

        //指定checkpoint的路径
        env.getCheckpointConfig().setCheckpointStorage("hdfs://master:9000/flink/checkpoint");

        //任务手动取消的时候不会删除checkpoint的数据
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);


        //读取数据
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                //kafka broker列表
                .setBootstrapServers("master:9092,node1:9092,node2:9092")
                //指定topic
                .setTopics("lines")
                //消费者组
                .setGroupId("Demo1NoState")
                //读取数据的位置
                .setStartingOffsets(OffsetsInitializer.latest())
                //数据格式
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        //使用kafka source
        DataStream<String> lines = env
                .fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka source");

        //一行转换成多行
        DataStream<String> words = lines.flatMap((line, collect) -> {
            for (String word : line.split(",")) {
                collect.collect(word);
            }
        }, Types.STRING);

        //分组
        KeyedStream<String, String> keyBys = words.keyBy(word -> word);

        //统计单词的数量
        DataStream<WordCount> wordCounts = keyBys.process(new KeyedProcessFunction<String, String, WordCount>() {
            //用于保存单词数量的状态
            //ValueState: 单值状态，为每一个key在状态中保存一个值
            //状态和普通集合的区别：普通计算任务执行报错数据会丢失，状态中的数据会被checkpoint定时保存到HDFS中，所以不会丢失
            private ValueState<Integer> countState;

            /**
             * open方法每一次task执行一次，一般用于初始化
             * 状态需要先在open中初始化
             */
            @Override
            public void open(Configuration parameters) {
                //获取flink的执行环境
                RuntimeContext context = getRuntimeContext();
                //创建状态描述对象
                ValueStateDescriptor<Integer> stateDescriptor = new ValueStateDescriptor<>("count", Types.INT);
                //创建状态
                countState = context.getState(stateDescriptor);
            }

            /**
             * 每一条数据执行一次
             */
            @Override
            public void processElement(String word,
                                       KeyedProcessFunction<String, String, WordCount>.Context ctx,
                                       Collector<WordCount> out) throws Exception {
                //从状态中获取单词的数量
                Integer count = countState.value();
                if (count == null) {
                    count = 0;
                }

                //统计单词的数量
                count++;
                //更新状态中的单词的数量
                countState.update(count);
                //将计算结果发生到下游
                out.collect(new WordCount(word, count));
            }
        });

        wordCounts.print();

        env.execute();

    }

    @Data
    @AllArgsConstructor
    static class WordCount {

        private String word;
        private Integer count;
    }
}
