package com.atguigu.flink.chapter07.state;

import com.atguigu.flink.bean.WaterSensor;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.runtime.state.storage.JobManagerCheckpointStorage;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;
import org.elasticsearch.common.MacAddressProvider;

public class Flink09_State_Backend {
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME","atguigu");
        StreamExecutionEnvironment env = StreamExecutionEnvironment
                .getExecutionEnvironment()
                .setParallelism(3);

        // 1. 开启checkpoint
        env.enableCheckpointing(2000);

        // 2. 配置状态后端
        // 2.1 内存状态后端  旧的写法
        // env.setStateBackend(new MemoryStateBackend());

        // 新的写法
        // env.setStateBackend(new HashMapStateBackend());
        // env.getCheckpointConfig().setCheckpointStorage(new JobManagerCheckpointStorage());

        // 2.2 fs状态后端
        // 旧的写法
        // env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/ck"));

        // env.setStateBackend(new HashMapStateBackend());
        // env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck1");

        // 2.3 rocksdb
        // 旧的写法
        // env.setStateBackend(new RocksDBStateBackend("hdfs://hadoop162:8020/ck2"));

        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck3");

        env
                .socketTextStream("hadoop162",9999)
                .map(value -> {
                    String[] datas = value.split(",");
                    return new WaterSensor(datas[0], Long.valueOf(datas[1]), Integer.valueOf(datas[2]));

                })
                .keyBy(WaterSensor::getId)
                .process(new KeyedProcessFunction<String, WaterSensor, String>() {

                    private ValueState<Integer> lastVcState;

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        lastVcState = getRuntimeContext()
                                .getState(new ValueStateDescriptor<Integer>("lastVcState", Integer.class));
                    }

                    @Override
                    public void processElement(WaterSensor value, Context ctx, Collector<String> out) throws Exception {
                        Integer lastVc = lastVcState.value();
                        if (lastVc != null) {
                            if (value.getVc() > 10 && lastVc > 10) {
                                out.collect(ctx.getCurrentKey() + "连续两次水位超过10， 红色预警...");
                            }
                        }
                        lastVcState.update(value.getVc());
                    }
                })
                .print();

        env.execute();

    }
}
