package cn.azzhu.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;

import java.util.Properties;

/**
 * @author azzhu
 * @create 2020-09-19 21:20:12
 */
public class MapWithStateV2 {
    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.enableCheckpointing(5000);
        //todo 为了实现EXACTLY_ONCE，必须记录偏移量
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.setStateBackend(new FsStateBackend("file:///D:\\bigdata\\flink-learning\\backend"));
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        //kafkaSource
        final Properties props = new Properties();
        //指定kafka的broker地址
        props.setProperty("bootstrap.servers", "hadoop105:9092,hadoop106:9092,hadoop105:9092");
        //指定组ID
        props.setProperty("group.id", "activity10");
        //如果没有指定偏移量，第一次从最开始消费
        props.setProperty("auto.offset.reset","earliest");
        //kafka的消费者不自动提交偏移量，而是交个Flink 通过checkpointing管理偏移量
        props.setProperty("enable.auto.commit","false");

        final FlinkKafkaConsumer<String> kafkaSource = new FlinkKafkaConsumer<>(
                "activity10",
                new SimpleStringSchema(),
                props
        );

        //source
        final DataStream<String> lines = env.addSource(kafkaSource);

        final SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                final String[] words = line.split(",");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        //todo 为了保证程序出现问题可以继续累加，要记录分组聚合的中间结果
        final KeyedStream<Tuple2<String, Integer>, Tuple> keyed = wordAndOne.keyBy(0);

        final SingleOutputStreamOperator<Tuple2<String, Integer>> summed = keyed.map(new RichMapFunction<Tuple2<String, Integer>, Tuple2<String, Integer>>() {
            private transient ValueState<Integer> valueState;

            @Override
            public void open(Configuration parameters) throws Exception {
                //初始化状态或恢复历史状态
                //定义一个状态描述器
                ValueStateDescriptor<Integer> descriptor = new ValueStateDescriptor<>("wc-keyed-state",
                         Types.INT);
                valueState = getRuntimeContext().getState(descriptor);
            }

            @Override
            public Tuple2<String, Integer> map(Tuple2<String, Integer> tp) throws Exception {
                //输入的key（单词）
                final String word = tp.f0;
                //输入的v(次数)
                Integer count = tp.f1;

                //根据state获取历史数据
                Integer total = valueState.value();
                if(total == null) {
                    //更新状态数据
                    valueState.update(count);
                    //返回结果
                    return Tuple2.of(word,count);
                } else {
                    //有历史数据，更新状态数据
                    total += count;
                    valueState.update(total);
                    return Tuple2.of(word,total);
                }
            }
        });

        summed.print();
        env.execute("MapWithState");
    }
}
