package com.lagou.bak;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.base.LongSerializer;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
import org.apache.flink.util.Collector;

import javax.swing.plaf.IconUIResource;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

/**
 * 求平均值
 * （1,3）（1,5）（1,7）（1,4）（1,2）
 */
public class StateTest1 {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.enableCheckpointing(2000);

        DataStreamSource<String> data = env.socketTextStream("hdp-1", 7777);
        SingleOutputStreamOperator<Tuple2<Long, Long>> maped = data.map(new MapFunction<String, Tuple2<Long, Long>>() {
            @Override
            public Tuple2<Long, Long> map(String value) throws Exception {
                String[] split = value.split(",");
                return new Tuple2<Long, Long>(Long.valueOf(split[0]), Long.valueOf(split[1]));
            }
        });

//        DataStreamSource<Tuple2<Long, Long>> data = env.fromElements(Tuple2.of(1l, 3l), Tuple2.of(1l, 5l), Tuple2.of(1l, 7l), Tuple2.of(1l, 4l), Tuple2.of(1l, 2l));
        KeyedStream<Tuple2<Long, Long>, Long> keyed = maped.keyBy(value -> value.f0);
//        keyed.
//        keyed.print();
        /*
        * 为什么用RichFlatMapFunction?  首先需求是调用flatMap方法，所以应该用FlatMapFunction，但是FlatMapFunction跟源码发现只有flatMap方法。此处需要初始化一些东西，RichFlatMapFunction
        继承自AbstractRichFunction,有open方法.并且实现了FlatMapFunction接口。是FlatMapFunction的功能丰富的变体（比如多了open方法）
        在说说AbstractRichFunction,继承自RichFunction，又继承自Function。
         Function是用户自定义函数UDF的基础接口
        RichFunction提供了两个功能：1、Function的生命周期方法 2、提供了访问Function运行时上下文
        AbstractRichFunction顾名思义Abstract即为RichFunction接口的抽象实现类，功能为实现类提供基类功能
        两个待深入点：1、UDf 2、运行时上下文
        UDF：开发人员实现业务逻辑就是UDF
        RuntimeContext:对于每个Task而言，有更细节的配置信息，所以Flink又抽象出了RuntimeContext，每一个Task实例有自己的RuntimeContext，StreamExecutionEnvironment中配置信息和算子级别信息的综合。
        */

        SingleOutputStreamOperator<Tuple2<Long, Long>> flatMaped = keyed.flatMap(new RichFlatMapFunction<Tuple2<Long, Long>, Tuple2<Long, Long>>() {
            private transient ValueState<Tuple2<Long, Long>> sum;

            @Override
            public void open(Configuration parameters) throws Exception {

//                ValueStateDescriptor<Long> count = new ValueStateDescriptor<>("count", LongSerializer.INSTANCE, 0L);
                System.out.println("...open");
                ValueStateDescriptor<Tuple2<Long, Long>> descriptor = new ValueStateDescriptor<>(
                        "average",
                        TypeInformation.of(new TypeHint<Tuple2<Long, Long>>() {
                        })
                        , Tuple2.of(0L, 0L)
                );
//                ValueStateDescriptor<Tuple2<Long, Long>> descriptor1 = new ValueStateDescriptor<>("average", TypeInformation.of(new TypeHint<Tuple2<Long, Long>>() {
//                }));
                //RuntimeContext是Function运行时的上下文，包含了Function在运行时需要的所有信息，如并行度相关信息、Task名称、执行配置信息ExecutionConfig、State等
                sum = getRuntimeContext().getState(descriptor);
//                sum.update(new Tuple2<>(0L,0L));

            }

            @Override
            public void flatMap(Tuple2<Long, Long> value, Collector<Tuple2<Long, Long>> out) throws Exception {

                //获取当前状态值
                Tuple2<Long, Long> currentSum = sum.value();

                //更新
                currentSum.f0 += 1;
                currentSum.f1 += value.f1;

                //更新状态值
                sum.update(currentSum);

                //如果count>=2 清空状态值，重新计算
                if(currentSum.f0 == 2) {
                    out.collect(new Tuple2<>(value.f0,currentSum.f1 / currentSum.f0));
                    sum.clear();
                }
            }


        });

//        flatMaped.print();

        flatMaped.addSink(new BufferingSink(2));

        env.execute();
    }
}

class BufferingSink implements SinkFunction<Tuple2<Long,Long>>, CheckpointedFunction{
    ListState<Tuple2<Long, Long>> checkpointedState;
    private List<Tuple2<Long,Long>> bufferedElements;
    private final int threshold;

    public BufferingSink(int threshold) {
        this.threshold = threshold;
        this.bufferedElements = new ArrayList<Tuple2<Long,Long>>();
    }

    // checkpoint 时会调用 snapshotState() 函数
    @Override
    public void snapshotState(FunctionSnapshotContext context) throws Exception {
        System.out.println("...snapshotState");
        // 清空 ListState，我们要放入最新的数据啦
        checkpointedState.clear();
        // 把当前局部变量中的所有元素写入到 checkpoint 中
        for (Tuple2<Long,Long> element : bufferedElements) {
            checkpointedState.add(element);
        }
    }

    // 需要处理第一次自定义函数初始化和从之前的 checkpoint 恢复两种情况
    // initializeState 方法接收一个 FunctionInitializationContext 参数，会用来初始化 non-keyed state 的 “容器”。这些容器是一个 ListState， 用于在 checkpoint 时保存 non-keyed state 对象。
    // 就是说我们可以通过 FunctionInitializationContext 获取 ListState 状态
    @Override
    public void initializeState(FunctionInitializationContext context) throws Exception {
        System.out.println(Thread.currentThread().getId() + "...initializeState");
        // StateDescriptor 会包括状态名字、以及状态类型相关信息
        ListStateDescriptor<Tuple2<Long, Long>> descriptor = new ListStateDescriptor<>("buffered-elements", TypeInformation.of(new TypeHint<Tuple2<Long, Long>>() {
        }));
        // context.getOperatorStateStore().getListState(descriptor) 使用 even-split redistribution 算法
        // 我们还可以通过 context.getKeyedStateStore() 获取 keyed state，当然要在 keyedStream 上使用啦！
        checkpointedState = context.getOperatorStateStore().getListState(descriptor);
        // 需要处理从 checkpoint/savepoint 恢复的情况
        // 通过 isRestored() 方法判断是否从之前的故障中恢复回来，如果该方法返回 true 则表示从故障中进行恢复，会执行接下来的恢复逻辑
        if(context.isRestored()) {
            for(Tuple2<Long,Long> element : checkpointedState.get()) {
                bufferedElements.add(element);
            }
            System.out.println("....initializeState.bufferedElements:" + bufferedElements);
        }
    }

    @Override
    public void invoke(Tuple2<Long, Long> value, Context context) throws Exception {
        System.out.println("...invoke...value:" + value);

        // 把数据加入局部变量中
        bufferedElements.add(value);
        // 达到阈值啦！快发送
        if(bufferedElements.size() == threshold) {
            for (Tuple2<Long,Long> element : bufferedElements) {
                //// 这里实现发送逻辑
                System.out.println("...out:" + element);
            }
            // 发送完注意清空缓存
            bufferedElements.clear();
        }
    }
}

class CounterSource extends RichParallelSourceFunction<Long> implements ListCheckpointed<Long> {

    /**  current offset for exactly once semantics */
    private Long offset = 0L;

    /** flag for job cancellation */
    private volatile boolean isRunning = true;

    @Override
    public void run(SourceContext<Long> ctx) {
        final Object lock = ctx.getCheckpointLock();

        while (isRunning) {
            // output and state update are atomic
            synchronized (lock) {
                ctx.collect(offset);
                offset += 1;
            }
        }
    }

    @Override
    public void cancel() {
        isRunning = false;
    }

    @Override
    public List<Long> snapshotState(long checkpointId, long checkpointTimestamp) {
        return Collections.singletonList(offset);
    }

    @Override
    public void restoreState(List<Long> state) {
        for (Long s : state)
            offset = s;
    }
}


