package cn.itcast.flink.keyedstate;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichReduceFunction;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * Author itcast
 * Date 2022/1/16 9:08
 * Desc 用户来通过Flink框架来维护 基于 key 上状态，keyed state
 * 通过 socket 获取数据源 hello,1  world,2  hello,2 根据key进行分组，对value进行reduce计算
 * 通过 keyed state 管理中间结果累加和
 * state 数据结构：
 * 1.ValueState 单值状态
 * 2.ListState 集合状态
 * 3.MapState key-Value
 * 4.BroadcastState 广播状态
 * 5.ReduceState 聚合状态
 */
public class KeyedState {
    static Logger logger = LoggerFactory.getLogger(KeyedState.class);

    public static void main(String[] args) throws Exception {
        // 实现步骤：
        //  1）创建flink流处理的运行环境 设置并行度
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //  2）启动checkpoint ， 维护 state 将其保存到 JobManager 内存中
        // checkpoint 是维护全局的 state
        env.enableCheckpointing(1000);
        //  3）构建socket数据源 ，数据样例 hello,1
        DataStreamSource<String> source = env.socketTextStream("node1", 9999);
        //  4）单词和个数拆分
        SingleOutputStreamOperator<Tuple2<String, Integer>> mapStream = source.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String value) throws Exception {
                String[] arrs = value.split(",");
                return Tuple2.of(arrs[0], Integer.parseInt(arrs[1]));
            }
        });
        //  5）按照单词进行分组 基于key上状态，需要使用 keyBy -> keyedStream
        KeyedStream<Tuple2<String, Integer>, String> keyedStream = mapStream.keyBy(t -> t.f0);
        //  6）先用 sum 进行求, sum 是个有状态的算子，可以直接进行增量累加
        SingleOutputStreamOperator<Tuple2<String, Integer>> result = keyedStream.sum(1);
        //  7) 根据单词进行reduce聚合操作
        // 状态在哪里？ 在内存中，通过 getRuntimeContext 获取内存中的状态
        SingleOutputStreamOperator<Tuple2<String, Integer>> result2 = keyedStream.reduce(new RichReduceFunction<Tuple2<String, Integer>>() {
            //定义 state 变量
            ValueState<Tuple2<String, Integer>> reduceState = null;

            //     1.open初始化，获取ValueState
            @Override
            public void open(Configuration parameters) throws Exception {
                reduceState = getRuntimeContext().getState(
                        //状态的属性，int string tuple ...
                        new ValueStateDescriptor("reduceState", Types.TUPLE(Types.STRING, Types.INT))
                );
            }

            //     2.reduce方法,获取ValueState的值，如果为空将Tuple1的f0和f1赋值，否则和Tuple2累加并将结果更
            //       新到 state 中
            @Override
            public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) throws Exception {
                    Tuple2<String, Integer> value = reduceState.value();
                    if (value == null) {
                        value = value1;
                        //reduceState.update(value);
                    }
                    reduceState.update(Tuple2.of(value.f0, value.f1 + value2.f1));
                    return Tuple2.of(value.f0, value.f1 + value2.f1);
            }

            //     3.close方法，打印 state 中的值
            @Override
            public void close() throws Exception {
                logger.info(reduceState.value().f0 + ":" + reduceState.value().f1);
            }
        });

        //  7）打印输出
        result2.print();
        //  8）启动作业
        env.execute();
    }
}
